1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Authors: Dipankar Sarma <[email protected]> 8 * Manfred Spraul <[email protected]> 9 * Paul E. McKenney <[email protected]> 10 * 11 * Based on the original work by Paul McKenney <[email protected]> 12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 13 * 14 * For detailed explanation of Read-Copy Update mechanism see - 15 * Documentation/RCU 16 */ 17 18 #define pr_fmt(fmt) "rcu: " fmt 19 20 #include <linux/types.h> 21 #include <linux/kernel.h> 22 #include <linux/init.h> 23 #include <linux/spinlock.h> 24 #include <linux/smp.h> 25 #include <linux/rcupdate_wait.h> 26 #include <linux/interrupt.h> 27 #include <linux/sched.h> 28 #include <linux/sched/debug.h> 29 #include <linux/nmi.h> 30 #include <linux/atomic.h> 31 #include <linux/bitops.h> 32 #include <linux/export.h> 33 #include <linux/completion.h> 34 #include <linux/kmemleak.h> 35 #include <linux/moduleparam.h> 36 #include <linux/panic.h> 37 #include <linux/panic_notifier.h> 38 #include <linux/percpu.h> 39 #include <linux/notifier.h> 40 #include <linux/cpu.h> 41 #include <linux/mutex.h> 42 #include <linux/time.h> 43 #include <linux/kernel_stat.h> 44 #include <linux/wait.h> 45 #include <linux/kthread.h> 46 #include <uapi/linux/sched/types.h> 47 #include <linux/prefetch.h> 48 #include <linux/delay.h> 49 #include <linux/random.h> 50 #include <linux/trace_events.h> 51 #include <linux/suspend.h> 52 #include <linux/ftrace.h> 53 #include <linux/tick.h> 54 #include <linux/sysrq.h> 55 #include <linux/kprobes.h> 56 #include <linux/gfp.h> 57 #include <linux/oom.h> 58 #include <linux/smpboot.h> 59 #include <linux/jiffies.h> 60 #include <linux/slab.h> 61 #include <linux/sched/isolation.h> 62 #include <linux/sched/clock.h> 63 #include <linux/vmalloc.h> 64 #include <linux/mm.h> 65 #include <linux/kasan.h> 66 #include <linux/context_tracking.h> 67 #include "../time/tick-internal.h" 68 69 #include "tree.h" 70 #include "rcu.h" 71 72 #ifdef MODULE_PARAM_PREFIX 73 #undef MODULE_PARAM_PREFIX 74 #endif 75 #define MODULE_PARAM_PREFIX "rcutree." 76 77 /* Data structures. */ 78 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *); 79 80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { 81 .gpwrap = true, 82 #ifdef CONFIG_RCU_NOCB_CPU 83 .cblist.flags = SEGCBLIST_RCU_CORE, 84 #endif 85 }; 86 static struct rcu_state rcu_state = { 87 .level = { &rcu_state.node[0] }, 88 .gp_state = RCU_GP_IDLE, 89 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, 90 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex), 91 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock), 92 .name = RCU_NAME, 93 .abbr = RCU_ABBR, 94 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex), 95 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex), 96 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED, 97 .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work, 98 rcu_sr_normal_gp_cleanup_work), 99 .srs_cleanups_pending = ATOMIC_INIT(0), 100 }; 101 102 /* Dump rcu_node combining tree at boot to verify correct setup. */ 103 static bool dump_tree; 104 module_param(dump_tree, bool, 0444); 105 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ 106 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT); 107 #ifndef CONFIG_PREEMPT_RT 108 module_param(use_softirq, bool, 0444); 109 #endif 110 /* Control rcu_node-tree auto-balancing at boot time. */ 111 static bool rcu_fanout_exact; 112 module_param(rcu_fanout_exact, bool, 0444); 113 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ 114 static int rcu_fanout_leaf = RCU_FANOUT_LEAF; 115 module_param(rcu_fanout_leaf, int, 0444); 116 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; 117 /* Number of rcu_nodes at specified level. */ 118 int num_rcu_lvl[] = NUM_RCU_LVL_INIT; 119 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ 120 121 /* 122 * The rcu_scheduler_active variable is initialized to the value 123 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the 124 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, 125 * RCU can assume that there is but one task, allowing RCU to (for example) 126 * optimize synchronize_rcu() to a simple barrier(). When this variable 127 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required 128 * to detect real grace periods. This variable is also used to suppress 129 * boot-time false positives from lockdep-RCU error checking. Finally, it 130 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU 131 * is fully initialized, including all of its kthreads having been spawned. 132 */ 133 int rcu_scheduler_active __read_mostly; 134 EXPORT_SYMBOL_GPL(rcu_scheduler_active); 135 136 /* 137 * The rcu_scheduler_fully_active variable transitions from zero to one 138 * during the early_initcall() processing, which is after the scheduler 139 * is capable of creating new tasks. So RCU processing (for example, 140 * creating tasks for RCU priority boosting) must be delayed until after 141 * rcu_scheduler_fully_active transitions from zero to one. We also 142 * currently delay invocation of any RCU callbacks until after this point. 143 * 144 * It might later prove better for people registering RCU callbacks during 145 * early boot to take responsibility for these callbacks, but one step at 146 * a time. 147 */ 148 static int rcu_scheduler_fully_active __read_mostly; 149 150 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 151 unsigned long gps, unsigned long flags); 152 static struct task_struct *rcu_boost_task(struct rcu_node *rnp); 153 static void invoke_rcu_core(void); 154 static void rcu_report_exp_rdp(struct rcu_data *rdp); 155 static void sync_sched_exp_online_cleanup(int cpu); 156 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp); 157 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp); 158 static bool rcu_rdp_cpu_online(struct rcu_data *rdp); 159 static bool rcu_init_invoked(void); 160 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 161 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 162 163 /* 164 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop" 165 * real-time priority(enabling/disabling) is controlled by 166 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration. 167 */ 168 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; 169 module_param(kthread_prio, int, 0444); 170 171 /* Delay in jiffies for grace-period initialization delays, debug only. */ 172 173 static int gp_preinit_delay; 174 module_param(gp_preinit_delay, int, 0444); 175 static int gp_init_delay; 176 module_param(gp_init_delay, int, 0444); 177 static int gp_cleanup_delay; 178 module_param(gp_cleanup_delay, int, 0444); 179 static int nohz_full_patience_delay; 180 module_param(nohz_full_patience_delay, int, 0444); 181 static int nohz_full_patience_delay_jiffies; 182 183 // Add delay to rcu_read_unlock() for strict grace periods. 184 static int rcu_unlock_delay; 185 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD 186 module_param(rcu_unlock_delay, int, 0444); 187 #endif 188 189 /* 190 * This rcu parameter is runtime-read-only. It reflects 191 * a minimum allowed number of objects which can be cached 192 * per-CPU. Object size is equal to one page. This value 193 * can be changed at boot time. 194 */ 195 static int rcu_min_cached_objs = 5; 196 module_param(rcu_min_cached_objs, int, 0444); 197 198 // A page shrinker can ask for pages to be freed to make them 199 // available for other parts of the system. This usually happens 200 // under low memory conditions, and in that case we should also 201 // defer page-cache filling for a short time period. 202 // 203 // The default value is 5 seconds, which is long enough to reduce 204 // interference with the shrinker while it asks other systems to 205 // drain their caches. 206 static int rcu_delay_page_cache_fill_msec = 5000; 207 module_param(rcu_delay_page_cache_fill_msec, int, 0444); 208 209 /* Retrieve RCU kthreads priority for rcutorture */ 210 int rcu_get_gp_kthreads_prio(void) 211 { 212 return kthread_prio; 213 } 214 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); 215 216 /* 217 * Number of grace periods between delays, normalized by the duration of 218 * the delay. The longer the delay, the more the grace periods between 219 * each delay. The reason for this normalization is that it means that, 220 * for non-zero delays, the overall slowdown of grace periods is constant 221 * regardless of the duration of the delay. This arrangement balances 222 * the need for long delays to increase some race probabilities with the 223 * need for fast grace periods to increase other race probabilities. 224 */ 225 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */ 226 227 /* 228 * Return true if an RCU grace period is in progress. The READ_ONCE()s 229 * permit this function to be invoked without holding the root rcu_node 230 * structure's ->lock, but of course results can be subject to change. 231 */ 232 static int rcu_gp_in_progress(void) 233 { 234 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq)); 235 } 236 237 /* 238 * Return the number of callbacks queued on the specified CPU. 239 * Handles both the nocbs and normal cases. 240 */ 241 static long rcu_get_n_cbs_cpu(int cpu) 242 { 243 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 244 245 if (rcu_segcblist_is_enabled(&rdp->cblist)) 246 return rcu_segcblist_n_cbs(&rdp->cblist); 247 return 0; 248 } 249 250 /** 251 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing 252 * 253 * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU. 254 * This is a special-purpose function to be used in the softirq 255 * infrastructure and perhaps the occasional long-running softirq 256 * handler. 257 * 258 * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is 259 * equivalent to momentarily completely enabling preemption. For 260 * example, given this code:: 261 * 262 * local_bh_disable(); 263 * do_something(); 264 * rcu_softirq_qs(); // A 265 * do_something_else(); 266 * local_bh_enable(); // B 267 * 268 * A call to synchronize_rcu() that began concurrently with the 269 * call to do_something() would be guaranteed to wait only until 270 * execution reached statement A. Without that rcu_softirq_qs(), 271 * that same synchronize_rcu() would instead be guaranteed to wait 272 * until execution reached statement B. 273 */ 274 void rcu_softirq_qs(void) 275 { 276 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 277 lock_is_held(&rcu_lock_map) || 278 lock_is_held(&rcu_sched_lock_map), 279 "Illegal rcu_softirq_qs() in RCU read-side critical section"); 280 rcu_qs(); 281 rcu_preempt_deferred_qs(current); 282 rcu_tasks_qs(current, false); 283 } 284 285 /* 286 * Reset the current CPU's RCU_WATCHING counter to indicate that the 287 * newly onlined CPU is no longer in an extended quiescent state. 288 * This will either leave the counter unchanged, or increment it 289 * to the next non-quiescent value. 290 * 291 * The non-atomic test/increment sequence works because the upper bits 292 * of the ->state variable are manipulated only by the corresponding CPU, 293 * or when the corresponding CPU is offline. 294 */ 295 static void rcu_watching_online(void) 296 { 297 if (ct_rcu_watching() & CT_RCU_WATCHING) 298 return; 299 ct_state_inc(CT_RCU_WATCHING); 300 } 301 302 /* 303 * Return true if the snapshot returned from ct_rcu_watching() 304 * indicates that RCU is in an extended quiescent state. 305 */ 306 static bool rcu_watching_snap_in_eqs(int snap) 307 { 308 return !(snap & CT_RCU_WATCHING); 309 } 310 311 /** 312 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU 313 * since the specified @snap? 314 * 315 * @rdp: The rcu_data corresponding to the CPU for which to check EQS. 316 * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS. 317 * 318 * Returns true if the CPU corresponding to @rdp has spent some time in an 319 * extended quiescent state since @snap. Note that this doesn't check if it 320 * /still/ is in an EQS, just that it went through one since @snap. 321 * 322 * This is meant to be used in a loop waiting for a CPU to go through an EQS. 323 */ 324 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap) 325 { 326 /* 327 * The first failing snapshot is already ordered against the accesses 328 * performed by the remote CPU after it exits idle. 329 * 330 * The second snapshot therefore only needs to order against accesses 331 * performed by the remote CPU prior to entering idle and therefore can 332 * rely solely on acquire semantics. 333 */ 334 if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap))) 335 return true; 336 337 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu); 338 } 339 340 /* 341 * Return true if the referenced integer is zero while the specified 342 * CPU remains within a single extended quiescent state. 343 */ 344 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) 345 { 346 int snap; 347 348 // If not quiescent, force back to earlier extended quiescent state. 349 snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING; 350 smp_rmb(); // Order CT state and *vp reads. 351 if (READ_ONCE(*vp)) 352 return false; // Non-zero, so report failure; 353 smp_rmb(); // Order *vp read and CT state re-read. 354 355 // If still in the same extended quiescent state, we are good! 356 return snap == ct_rcu_watching_cpu(cpu); 357 } 358 359 /* 360 * Let the RCU core know that this CPU has gone through the scheduler, 361 * which is a quiescent state. This is called when the need for a 362 * quiescent state is urgent, so we burn an atomic operation and full 363 * memory barriers to let the RCU core know about it, regardless of what 364 * this CPU might (or might not) do in the near future. 365 * 366 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 367 * 368 * The caller must have disabled interrupts and must not be idle. 369 */ 370 notrace void rcu_momentary_dyntick_idle(void) 371 { 372 int seq; 373 374 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); 375 seq = ct_state_inc(2 * CT_RCU_WATCHING); 376 /* It is illegal to call this from idle state. */ 377 WARN_ON_ONCE(!(seq & CT_RCU_WATCHING)); 378 rcu_preempt_deferred_qs(current); 379 } 380 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); 381 382 /** 383 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle 384 * 385 * If the current CPU is idle and running at a first-level (not nested) 386 * interrupt, or directly, from idle, return true. 387 * 388 * The caller must have at least disabled IRQs. 389 */ 390 static int rcu_is_cpu_rrupt_from_idle(void) 391 { 392 long nesting; 393 394 /* 395 * Usually called from the tick; but also used from smp_function_call() 396 * for expedited grace periods. This latter can result in running from 397 * the idle task, instead of an actual IPI. 398 */ 399 lockdep_assert_irqs_disabled(); 400 401 /* Check for counter underflows */ 402 RCU_LOCKDEP_WARN(ct_nesting() < 0, 403 "RCU nesting counter underflow!"); 404 RCU_LOCKDEP_WARN(ct_nmi_nesting() <= 0, 405 "RCU nmi_nesting counter underflow/zero!"); 406 407 /* Are we at first interrupt nesting level? */ 408 nesting = ct_nmi_nesting(); 409 if (nesting > 1) 410 return false; 411 412 /* 413 * If we're not in an interrupt, we must be in the idle task! 414 */ 415 WARN_ON_ONCE(!nesting && !is_idle_task(current)); 416 417 /* Does CPU appear to be idle from an RCU standpoint? */ 418 return ct_nesting() == 0; 419 } 420 421 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10) 422 // Maximum callbacks per rcu_do_batch ... 423 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood. 424 static long blimit = DEFAULT_RCU_BLIMIT; 425 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit. 426 static long qhimark = DEFAULT_RCU_QHIMARK; 427 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit. 428 static long qlowmark = DEFAULT_RCU_QLOMARK; 429 #define DEFAULT_RCU_QOVLD_MULT 2 430 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK) 431 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS. 432 static long qovld_calc = -1; // No pre-initialization lock acquisitions! 433 434 module_param(blimit, long, 0444); 435 module_param(qhimark, long, 0444); 436 module_param(qlowmark, long, 0444); 437 module_param(qovld, long, 0444); 438 439 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX; 440 static ulong jiffies_till_next_fqs = ULONG_MAX; 441 static bool rcu_kick_kthreads; 442 static int rcu_divisor = 7; 443 module_param(rcu_divisor, int, 0644); 444 445 /* Force an exit from rcu_do_batch() after 3 milliseconds. */ 446 static long rcu_resched_ns = 3 * NSEC_PER_MSEC; 447 module_param(rcu_resched_ns, long, 0644); 448 449 /* 450 * How long the grace period must be before we start recruiting 451 * quiescent-state help from rcu_note_context_switch(). 452 */ 453 static ulong jiffies_till_sched_qs = ULONG_MAX; 454 module_param(jiffies_till_sched_qs, ulong, 0444); 455 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ 456 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ 457 458 /* 459 * Make sure that we give the grace-period kthread time to detect any 460 * idle CPUs before taking active measures to force quiescent states. 461 * However, don't go below 100 milliseconds, adjusted upwards for really 462 * large systems. 463 */ 464 static void adjust_jiffies_till_sched_qs(void) 465 { 466 unsigned long j; 467 468 /* If jiffies_till_sched_qs was specified, respect the request. */ 469 if (jiffies_till_sched_qs != ULONG_MAX) { 470 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); 471 return; 472 } 473 /* Otherwise, set to third fqs scan, but bound below on large system. */ 474 j = READ_ONCE(jiffies_till_first_fqs) + 475 2 * READ_ONCE(jiffies_till_next_fqs); 476 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) 477 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 478 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j); 479 WRITE_ONCE(jiffies_to_sched_qs, j); 480 } 481 482 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp) 483 { 484 ulong j; 485 int ret = kstrtoul(val, 0, &j); 486 487 if (!ret) { 488 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j); 489 adjust_jiffies_till_sched_qs(); 490 } 491 return ret; 492 } 493 494 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp) 495 { 496 ulong j; 497 int ret = kstrtoul(val, 0, &j); 498 499 if (!ret) { 500 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1)); 501 adjust_jiffies_till_sched_qs(); 502 } 503 return ret; 504 } 505 506 static const struct kernel_param_ops first_fqs_jiffies_ops = { 507 .set = param_set_first_fqs_jiffies, 508 .get = param_get_ulong, 509 }; 510 511 static const struct kernel_param_ops next_fqs_jiffies_ops = { 512 .set = param_set_next_fqs_jiffies, 513 .get = param_get_ulong, 514 }; 515 516 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644); 517 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644); 518 module_param(rcu_kick_kthreads, bool, 0644); 519 520 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); 521 static int rcu_pending(int user); 522 523 /* 524 * Return the number of RCU GPs completed thus far for debug & stats. 525 */ 526 unsigned long rcu_get_gp_seq(void) 527 { 528 return READ_ONCE(rcu_state.gp_seq); 529 } 530 EXPORT_SYMBOL_GPL(rcu_get_gp_seq); 531 532 /* 533 * Return the number of RCU expedited batches completed thus far for 534 * debug & stats. Odd numbers mean that a batch is in progress, even 535 * numbers mean idle. The value returned will thus be roughly double 536 * the cumulative batches since boot. 537 */ 538 unsigned long rcu_exp_batches_completed(void) 539 { 540 return rcu_state.expedited_sequence; 541 } 542 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed); 543 544 /* 545 * Return the root node of the rcu_state structure. 546 */ 547 static struct rcu_node *rcu_get_root(void) 548 { 549 return &rcu_state.node[0]; 550 } 551 552 /* 553 * Send along grace-period-related data for rcutorture diagnostics. 554 */ 555 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq) 556 { 557 *flags = READ_ONCE(rcu_state.gp_flags); 558 *gp_seq = rcu_seq_current(&rcu_state.gp_seq); 559 } 560 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 561 562 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) 563 /* 564 * An empty function that will trigger a reschedule on 565 * IRQ tail once IRQs get re-enabled on userspace/guest resume. 566 */ 567 static void late_wakeup_func(struct irq_work *work) 568 { 569 } 570 571 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) = 572 IRQ_WORK_INIT(late_wakeup_func); 573 574 /* 575 * If either: 576 * 577 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work 578 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry. 579 * 580 * In these cases the late RCU wake ups aren't supported in the resched loops and our 581 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs 582 * get re-enabled again. 583 */ 584 noinstr void rcu_irq_work_resched(void) 585 { 586 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 587 588 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) 589 return; 590 591 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) 592 return; 593 594 instrumentation_begin(); 595 if (do_nocb_deferred_wakeup(rdp) && need_resched()) { 596 irq_work_queue(this_cpu_ptr(&late_wakeup_work)); 597 } 598 instrumentation_end(); 599 } 600 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */ 601 602 #ifdef CONFIG_PROVE_RCU 603 /** 604 * rcu_irq_exit_check_preempt - Validate that scheduling is possible 605 */ 606 void rcu_irq_exit_check_preempt(void) 607 { 608 lockdep_assert_irqs_disabled(); 609 610 RCU_LOCKDEP_WARN(ct_nesting() <= 0, 611 "RCU nesting counter underflow/zero!"); 612 RCU_LOCKDEP_WARN(ct_nmi_nesting() != 613 CT_NESTING_IRQ_NONIDLE, 614 "Bad RCU nmi_nesting counter\n"); 615 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(), 616 "RCU in extended quiescent state!"); 617 } 618 #endif /* #ifdef CONFIG_PROVE_RCU */ 619 620 #ifdef CONFIG_NO_HZ_FULL 621 /** 622 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. 623 * 624 * The scheduler tick is not normally enabled when CPUs enter the kernel 625 * from nohz_full userspace execution. After all, nohz_full userspace 626 * execution is an RCU quiescent state and the time executing in the kernel 627 * is quite short. Except of course when it isn't. And it is not hard to 628 * cause a large system to spend tens of seconds or even minutes looping 629 * in the kernel, which can cause a number of problems, include RCU CPU 630 * stall warnings. 631 * 632 * Therefore, if a nohz_full CPU fails to report a quiescent state 633 * in a timely manner, the RCU grace-period kthread sets that CPU's 634 * ->rcu_urgent_qs flag with the expectation that the next interrupt or 635 * exception will invoke this function, which will turn on the scheduler 636 * tick, which will enable RCU to detect that CPU's quiescent states, 637 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. 638 * The tick will be disabled once a quiescent state is reported for 639 * this CPU. 640 * 641 * Of course, in carefully tuned systems, there might never be an 642 * interrupt or exception. In that case, the RCU grace-period kthread 643 * will eventually cause one to happen. However, in less carefully 644 * controlled environments, this function allows RCU to get what it 645 * needs without creating otherwise useless interruptions. 646 */ 647 void __rcu_irq_enter_check_tick(void) 648 { 649 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 650 651 // If we're here from NMI there's nothing to do. 652 if (in_nmi()) 653 return; 654 655 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(), 656 "Illegal rcu_irq_enter_check_tick() from extended quiescent state"); 657 658 if (!tick_nohz_full_cpu(rdp->cpu) || 659 !READ_ONCE(rdp->rcu_urgent_qs) || 660 READ_ONCE(rdp->rcu_forced_tick)) { 661 // RCU doesn't need nohz_full help from this CPU, or it is 662 // already getting that help. 663 return; 664 } 665 666 // We get here only when not in an extended quiescent state and 667 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is 668 // already watching and (2) The fact that we are in an interrupt 669 // handler and that the rcu_node lock is an irq-disabled lock 670 // prevents self-deadlock. So we can safely recheck under the lock. 671 // Note that the nohz_full state currently cannot change. 672 raw_spin_lock_rcu_node(rdp->mynode); 673 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { 674 // A nohz_full CPU is in the kernel and RCU needs a 675 // quiescent state. Turn on the tick! 676 WRITE_ONCE(rdp->rcu_forced_tick, true); 677 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 678 } 679 raw_spin_unlock_rcu_node(rdp->mynode); 680 } 681 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick); 682 #endif /* CONFIG_NO_HZ_FULL */ 683 684 /* 685 * Check to see if any future non-offloaded RCU-related work will need 686 * to be done by the current CPU, even if none need be done immediately, 687 * returning 1 if so. This function is part of the RCU implementation; 688 * it is -not- an exported member of the RCU API. This is used by 689 * the idle-entry code to figure out whether it is safe to disable the 690 * scheduler-clock interrupt. 691 * 692 * Just check whether or not this CPU has non-offloaded RCU callbacks 693 * queued. 694 */ 695 int rcu_needs_cpu(void) 696 { 697 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && 698 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data)); 699 } 700 701 /* 702 * If any sort of urgency was applied to the current CPU (for example, 703 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order 704 * to get to a quiescent state, disable it. 705 */ 706 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) 707 { 708 raw_lockdep_assert_held_rcu_node(rdp->mynode); 709 WRITE_ONCE(rdp->rcu_urgent_qs, false); 710 WRITE_ONCE(rdp->rcu_need_heavy_qs, false); 711 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) { 712 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU); 713 WRITE_ONCE(rdp->rcu_forced_tick, false); 714 } 715 } 716 717 /** 718 * rcu_is_watching - RCU read-side critical sections permitted on current CPU? 719 * 720 * Return @true if RCU is watching the running CPU and @false otherwise. 721 * An @true return means that this CPU can safely enter RCU read-side 722 * critical sections. 723 * 724 * Although calls to rcu_is_watching() from most parts of the kernel 725 * will return @true, there are important exceptions. For example, if the 726 * current CPU is deep within its idle loop, in kernel entry/exit code, 727 * or offline, rcu_is_watching() will return @false. 728 * 729 * Make notrace because it can be called by the internal functions of 730 * ftrace, and making this notrace removes unnecessary recursion calls. 731 */ 732 notrace bool rcu_is_watching(void) 733 { 734 bool ret; 735 736 preempt_disable_notrace(); 737 ret = rcu_is_watching_curr_cpu(); 738 preempt_enable_notrace(); 739 return ret; 740 } 741 EXPORT_SYMBOL_GPL(rcu_is_watching); 742 743 /* 744 * If a holdout task is actually running, request an urgent quiescent 745 * state from its CPU. This is unsynchronized, so migrations can cause 746 * the request to go to the wrong CPU. Which is OK, all that will happen 747 * is that the CPU's next context switch will be a bit slower and next 748 * time around this task will generate another request. 749 */ 750 void rcu_request_urgent_qs_task(struct task_struct *t) 751 { 752 int cpu; 753 754 barrier(); 755 cpu = task_cpu(t); 756 if (!task_curr(t)) 757 return; /* This task is not running on that CPU. */ 758 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); 759 } 760 761 /* 762 * When trying to report a quiescent state on behalf of some other CPU, 763 * it is our responsibility to check for and handle potential overflow 764 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. 765 * After all, the CPU might be in deep idle state, and thus executing no 766 * code whatsoever. 767 */ 768 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 769 { 770 raw_lockdep_assert_held_rcu_node(rnp); 771 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, 772 rnp->gp_seq)) 773 WRITE_ONCE(rdp->gpwrap, true); 774 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) 775 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; 776 } 777 778 /* 779 * Snapshot the specified CPU's dynticks counter so that we can later 780 * credit them with an implicit quiescent state. Return 1 if this CPU 781 * is in dynticks idle mode, which is an extended quiescent state. 782 */ 783 static int dyntick_save_progress_counter(struct rcu_data *rdp) 784 { 785 /* 786 * Full ordering between remote CPU's post idle accesses and updater's 787 * accesses prior to current GP (and also the started GP sequence number) 788 * is enforced by rcu_seq_start() implicit barrier and even further by 789 * smp_mb__after_unlock_lock() barriers chained all the way throughout the 790 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp 791 * locking. 792 * 793 * Ordering between remote CPU's pre idle accesses and post grace period 794 * updater's accesses is enforced by the below acquire semantic. 795 */ 796 rdp->dynticks_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); 797 if (rcu_watching_snap_in_eqs(rdp->dynticks_snap)) { 798 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 799 rcu_gpnum_ovf(rdp->mynode, rdp); 800 return 1; 801 } 802 return 0; 803 } 804 805 /* 806 * Returns positive if the specified CPU has passed through a quiescent state 807 * by virtue of being in or having passed through an dynticks idle state since 808 * the last call to dyntick_save_progress_counter() for this same CPU, or by 809 * virtue of having been offline. 810 * 811 * Returns negative if the specified CPU needs a force resched. 812 * 813 * Returns zero otherwise. 814 */ 815 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 816 { 817 unsigned long jtsq; 818 int ret = 0; 819 struct rcu_node *rnp = rdp->mynode; 820 821 /* 822 * If the CPU passed through or entered a dynticks idle phase with 823 * no active irq/NMI handlers, then we can safely pretend that the CPU 824 * already acknowledged the request to pass through a quiescent 825 * state. Either way, that CPU cannot possibly be in an RCU 826 * read-side critical section that started before the beginning 827 * of the current RCU grace period. 828 */ 829 if (rcu_watching_snap_stopped_since(rdp, rdp->dynticks_snap)) { 830 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); 831 rcu_gpnum_ovf(rnp, rdp); 832 return 1; 833 } 834 835 /* 836 * Complain if a CPU that is considered to be offline from RCU's 837 * perspective has not yet reported a quiescent state. After all, 838 * the offline CPU should have reported a quiescent state during 839 * the CPU-offline process, or, failing that, by rcu_gp_init() 840 * if it ran concurrently with either the CPU going offline or the 841 * last task on a leaf rcu_node structure exiting its RCU read-side 842 * critical section while all CPUs corresponding to that structure 843 * are offline. This added warning detects bugs in any of these 844 * code paths. 845 * 846 * The rcu_node structure's ->lock is held here, which excludes 847 * the relevant portions the CPU-hotplug code, the grace-period 848 * initialization code, and the rcu_read_unlock() code paths. 849 * 850 * For more detail, please refer to the "Hotplug CPU" section 851 * of RCU's Requirements documentation. 852 */ 853 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { 854 struct rcu_node *rnp1; 855 856 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", 857 __func__, rnp->grplo, rnp->grphi, rnp->level, 858 (long)rnp->gp_seq, (long)rnp->completedqs); 859 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) 860 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n", 861 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask); 862 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n", 863 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)], 864 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state, 865 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state); 866 return 1; /* Break things loose after complaining. */ 867 } 868 869 /* 870 * A CPU running for an extended time within the kernel can 871 * delay RCU grace periods: (1) At age jiffies_to_sched_qs, 872 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set 873 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the 874 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs 875 * variable are safe because the assignments are repeated if this 876 * CPU failed to pass through a quiescent state. This code 877 * also checks .jiffies_resched in case jiffies_to_sched_qs 878 * is set way high. 879 */ 880 jtsq = READ_ONCE(jiffies_to_sched_qs); 881 if (!READ_ONCE(rdp->rcu_need_heavy_qs) && 882 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || 883 time_after(jiffies, rcu_state.jiffies_resched) || 884 rcu_state.cbovld)) { 885 WRITE_ONCE(rdp->rcu_need_heavy_qs, true); 886 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */ 887 smp_store_release(&rdp->rcu_urgent_qs, true); 888 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) { 889 WRITE_ONCE(rdp->rcu_urgent_qs, true); 890 } 891 892 /* 893 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! 894 * The above code handles this, but only for straight cond_resched(). 895 * And some in-kernel loops check need_resched() before calling 896 * cond_resched(), which defeats the above code for CPUs that are 897 * running in-kernel with scheduling-clock interrupts disabled. 898 * So hit them over the head with the resched_cpu() hammer! 899 */ 900 if (tick_nohz_full_cpu(rdp->cpu) && 901 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || 902 rcu_state.cbovld)) { 903 WRITE_ONCE(rdp->rcu_urgent_qs, true); 904 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 905 ret = -1; 906 } 907 908 /* 909 * If more than halfway to RCU CPU stall-warning time, invoke 910 * resched_cpu() more frequently to try to loosen things up a bit. 911 * Also check to see if the CPU is getting hammered with interrupts, 912 * but only once per grace period, just to keep the IPIs down to 913 * a dull roar. 914 */ 915 if (time_after(jiffies, rcu_state.jiffies_resched)) { 916 if (time_after(jiffies, 917 READ_ONCE(rdp->last_fqs_resched) + jtsq)) { 918 WRITE_ONCE(rdp->last_fqs_resched, jiffies); 919 ret = -1; 920 } 921 if (IS_ENABLED(CONFIG_IRQ_WORK) && 922 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && 923 (rnp->ffmask & rdp->grpmask)) { 924 rdp->rcu_iw_pending = true; 925 rdp->rcu_iw_gp_seq = rnp->gp_seq; 926 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 927 } 928 929 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { 930 int cpu = rdp->cpu; 931 struct rcu_snap_record *rsrp; 932 struct kernel_cpustat *kcsp; 933 934 kcsp = &kcpustat_cpu(cpu); 935 936 rsrp = &rdp->snap_record; 937 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu); 938 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu); 939 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu); 940 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu); 941 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu); 942 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu); 943 rsrp->jiffies = jiffies; 944 rsrp->gp_seq = rdp->gp_seq; 945 } 946 } 947 948 return ret; 949 } 950 951 /* Trace-event wrapper function for trace_rcu_future_grace_period. */ 952 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 953 unsigned long gp_seq_req, const char *s) 954 { 955 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 956 gp_seq_req, rnp->level, 957 rnp->grplo, rnp->grphi, s); 958 } 959 960 /* 961 * rcu_start_this_gp - Request the start of a particular grace period 962 * @rnp_start: The leaf node of the CPU from which to start. 963 * @rdp: The rcu_data corresponding to the CPU from which to start. 964 * @gp_seq_req: The gp_seq of the grace period to start. 965 * 966 * Start the specified grace period, as needed to handle newly arrived 967 * callbacks. The required future grace periods are recorded in each 968 * rcu_node structure's ->gp_seq_needed field. Returns true if there 969 * is reason to awaken the grace-period kthread. 970 * 971 * The caller must hold the specified rcu_node structure's ->lock, which 972 * is why the caller is responsible for waking the grace-period kthread. 973 * 974 * Returns true if the GP thread needs to be awakened else false. 975 */ 976 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, 977 unsigned long gp_seq_req) 978 { 979 bool ret = false; 980 struct rcu_node *rnp; 981 982 /* 983 * Use funnel locking to either acquire the root rcu_node 984 * structure's lock or bail out if the need for this grace period 985 * has already been recorded -- or if that grace period has in 986 * fact already started. If there is already a grace period in 987 * progress in a non-leaf node, no recording is needed because the 988 * end of the grace period will scan the leaf rcu_node structures. 989 * Note that rnp_start->lock must not be released. 990 */ 991 raw_lockdep_assert_held_rcu_node(rnp_start); 992 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); 993 for (rnp = rnp_start; 1; rnp = rnp->parent) { 994 if (rnp != rnp_start) 995 raw_spin_lock_rcu_node(rnp); 996 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) || 997 rcu_seq_started(&rnp->gp_seq, gp_seq_req) || 998 (rnp != rnp_start && 999 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) { 1000 trace_rcu_this_gp(rnp, rdp, gp_seq_req, 1001 TPS("Prestarted")); 1002 goto unlock_out; 1003 } 1004 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); 1005 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { 1006 /* 1007 * We just marked the leaf or internal node, and a 1008 * grace period is in progress, which means that 1009 * rcu_gp_cleanup() will see the marking. Bail to 1010 * reduce contention. 1011 */ 1012 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, 1013 TPS("Startedleaf")); 1014 goto unlock_out; 1015 } 1016 if (rnp != rnp_start && rnp->parent != NULL) 1017 raw_spin_unlock_rcu_node(rnp); 1018 if (!rnp->parent) 1019 break; /* At root, and perhaps also leaf. */ 1020 } 1021 1022 /* If GP already in progress, just leave, otherwise start one. */ 1023 if (rcu_gp_in_progress()) { 1024 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); 1025 goto unlock_out; 1026 } 1027 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); 1028 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT); 1029 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 1030 if (!READ_ONCE(rcu_state.gp_kthread)) { 1031 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); 1032 goto unlock_out; 1033 } 1034 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq")); 1035 ret = true; /* Caller must wake GP kthread. */ 1036 unlock_out: 1037 /* Push furthest requested GP to leaf node and rcu_data structure. */ 1038 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) { 1039 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed); 1040 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1041 } 1042 if (rnp != rnp_start) 1043 raw_spin_unlock_rcu_node(rnp); 1044 return ret; 1045 } 1046 1047 /* 1048 * Clean up any old requests for the just-ended grace period. Also return 1049 * whether any additional grace periods have been requested. 1050 */ 1051 static bool rcu_future_gp_cleanup(struct rcu_node *rnp) 1052 { 1053 bool needmore; 1054 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1055 1056 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed); 1057 if (!needmore) 1058 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */ 1059 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, 1060 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1061 return needmore; 1062 } 1063 1064 static void swake_up_one_online_ipi(void *arg) 1065 { 1066 struct swait_queue_head *wqh = arg; 1067 1068 swake_up_one(wqh); 1069 } 1070 1071 static void swake_up_one_online(struct swait_queue_head *wqh) 1072 { 1073 int cpu = get_cpu(); 1074 1075 /* 1076 * If called from rcutree_report_cpu_starting(), wake up 1077 * is dangerous that late in the CPU-down hotplug process. The 1078 * scheduler might queue an ignored hrtimer. Defer the wake up 1079 * to an online CPU instead. 1080 */ 1081 if (unlikely(cpu_is_offline(cpu))) { 1082 int target; 1083 1084 target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU), 1085 cpu_online_mask); 1086 1087 smp_call_function_single(target, swake_up_one_online_ipi, 1088 wqh, 0); 1089 put_cpu(); 1090 } else { 1091 put_cpu(); 1092 swake_up_one(wqh); 1093 } 1094 } 1095 1096 /* 1097 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an 1098 * interrupt or softirq handler, in which case we just might immediately 1099 * sleep upon return, resulting in a grace-period hang), and don't bother 1100 * awakening when there is nothing for the grace-period kthread to do 1101 * (as in several CPUs raced to awaken, we lost), and finally don't try 1102 * to awaken a kthread that has not yet been created. If all those checks 1103 * are passed, track some debug information and awaken. 1104 * 1105 * So why do the self-wakeup when in an interrupt or softirq handler 1106 * in the grace-period kthread's context? Because the kthread might have 1107 * been interrupted just as it was going to sleep, and just after the final 1108 * pre-sleep check of the awaken condition. In this case, a wakeup really 1109 * is required, and is therefore supplied. 1110 */ 1111 static void rcu_gp_kthread_wake(void) 1112 { 1113 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); 1114 1115 if ((current == t && !in_hardirq() && !in_serving_softirq()) || 1116 !READ_ONCE(rcu_state.gp_flags) || !t) 1117 return; 1118 WRITE_ONCE(rcu_state.gp_wake_time, jiffies); 1119 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq)); 1120 swake_up_one_online(&rcu_state.gp_wq); 1121 } 1122 1123 /* 1124 * If there is room, assign a ->gp_seq number to any callbacks on this 1125 * CPU that have not already been assigned. Also accelerate any callbacks 1126 * that were previously assigned a ->gp_seq number that has since proven 1127 * to be too conservative, which can happen if callbacks get assigned a 1128 * ->gp_seq number while RCU is idle, but with reference to a non-root 1129 * rcu_node structure. This function is idempotent, so it does not hurt 1130 * to call it repeatedly. Returns an flag saying that we should awaken 1131 * the RCU grace-period kthread. 1132 * 1133 * The caller must hold rnp->lock with interrupts disabled. 1134 */ 1135 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1136 { 1137 unsigned long gp_seq_req; 1138 bool ret = false; 1139 1140 rcu_lockdep_assert_cblist_protected(rdp); 1141 raw_lockdep_assert_held_rcu_node(rnp); 1142 1143 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1144 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1145 return false; 1146 1147 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); 1148 1149 /* 1150 * Callbacks are often registered with incomplete grace-period 1151 * information. Something about the fact that getting exact 1152 * information requires acquiring a global lock... RCU therefore 1153 * makes a conservative estimate of the grace period number at which 1154 * a given callback will become ready to invoke. The following 1155 * code checks this estimate and improves it when possible, thus 1156 * accelerating callback invocation to an earlier grace-period 1157 * number. 1158 */ 1159 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); 1160 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) 1161 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); 1162 1163 /* Trace depending on how much we were able to accelerate. */ 1164 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1165 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB")); 1166 else 1167 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB")); 1168 1169 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); 1170 1171 return ret; 1172 } 1173 1174 /* 1175 * Similar to rcu_accelerate_cbs(), but does not require that the leaf 1176 * rcu_node structure's ->lock be held. It consults the cached value 1177 * of ->gp_seq_needed in the rcu_data structure, and if that indicates 1178 * that a new grace-period request be made, invokes rcu_accelerate_cbs() 1179 * while holding the leaf rcu_node structure's ->lock. 1180 */ 1181 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, 1182 struct rcu_data *rdp) 1183 { 1184 unsigned long c; 1185 bool needwake; 1186 1187 rcu_lockdep_assert_cblist_protected(rdp); 1188 c = rcu_seq_snap(&rcu_state.gp_seq); 1189 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { 1190 /* Old request still live, so mark recent callbacks. */ 1191 (void)rcu_segcblist_accelerate(&rdp->cblist, c); 1192 return; 1193 } 1194 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 1195 needwake = rcu_accelerate_cbs(rnp, rdp); 1196 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 1197 if (needwake) 1198 rcu_gp_kthread_wake(); 1199 } 1200 1201 /* 1202 * Move any callbacks whose grace period has completed to the 1203 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1204 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL 1205 * sublist. This function is idempotent, so it does not hurt to 1206 * invoke it repeatedly. As long as it is not invoked -too- often... 1207 * Returns true if the RCU grace-period kthread needs to be awakened. 1208 * 1209 * The caller must hold rnp->lock with interrupts disabled. 1210 */ 1211 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) 1212 { 1213 rcu_lockdep_assert_cblist_protected(rdp); 1214 raw_lockdep_assert_held_rcu_node(rnp); 1215 1216 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */ 1217 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) 1218 return false; 1219 1220 /* 1221 * Find all callbacks whose ->gp_seq numbers indicate that they 1222 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1223 */ 1224 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); 1225 1226 /* Classify any remaining callbacks. */ 1227 return rcu_accelerate_cbs(rnp, rdp); 1228 } 1229 1230 /* 1231 * Move and classify callbacks, but only if doing so won't require 1232 * that the RCU grace-period kthread be awakened. 1233 */ 1234 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, 1235 struct rcu_data *rdp) 1236 { 1237 rcu_lockdep_assert_cblist_protected(rdp); 1238 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) 1239 return; 1240 // The grace period cannot end while we hold the rcu_node lock. 1241 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) 1242 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); 1243 raw_spin_unlock_rcu_node(rnp); 1244 } 1245 1246 /* 1247 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a 1248 * quiescent state. This is intended to be invoked when the CPU notices 1249 * a new grace period. 1250 */ 1251 static void rcu_strict_gp_check_qs(void) 1252 { 1253 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { 1254 rcu_read_lock(); 1255 rcu_read_unlock(); 1256 } 1257 } 1258 1259 /* 1260 * Update CPU-local rcu_data state to record the beginnings and ends of 1261 * grace periods. The caller must hold the ->lock of the leaf rcu_node 1262 * structure corresponding to the current CPU, and must have irqs disabled. 1263 * Returns true if the grace-period kthread needs to be awakened. 1264 */ 1265 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) 1266 { 1267 bool ret = false; 1268 bool need_qs; 1269 const bool offloaded = rcu_rdp_is_offloaded(rdp); 1270 1271 raw_lockdep_assert_held_rcu_node(rnp); 1272 1273 if (rdp->gp_seq == rnp->gp_seq) 1274 return false; /* Nothing to do. */ 1275 1276 /* Handle the ends of any preceding grace periods first. */ 1277 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || 1278 unlikely(READ_ONCE(rdp->gpwrap))) { 1279 if (!offloaded) 1280 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ 1281 rdp->core_needs_qs = false; 1282 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); 1283 } else { 1284 if (!offloaded) 1285 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ 1286 if (rdp->core_needs_qs) 1287 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask); 1288 } 1289 1290 /* Now handle the beginnings of any new-to-this-CPU grace periods. */ 1291 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || 1292 unlikely(READ_ONCE(rdp->gpwrap))) { 1293 /* 1294 * If the current grace period is waiting for this CPU, 1295 * set up to detect a quiescent state, otherwise don't 1296 * go looking for one. 1297 */ 1298 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart")); 1299 need_qs = !!(rnp->qsmask & rdp->grpmask); 1300 rdp->cpu_no_qs.b.norm = need_qs; 1301 rdp->core_needs_qs = need_qs; 1302 zero_cpu_stall_ticks(rdp); 1303 } 1304 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ 1305 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) 1306 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); 1307 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap)) 1308 WRITE_ONCE(rdp->last_sched_clock, jiffies); 1309 WRITE_ONCE(rdp->gpwrap, false); 1310 rcu_gpnum_ovf(rnp, rdp); 1311 return ret; 1312 } 1313 1314 static void note_gp_changes(struct rcu_data *rdp) 1315 { 1316 unsigned long flags; 1317 bool needwake; 1318 struct rcu_node *rnp; 1319 1320 local_irq_save(flags); 1321 rnp = rdp->mynode; 1322 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && 1323 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1324 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1325 local_irq_restore(flags); 1326 return; 1327 } 1328 needwake = __note_gp_changes(rnp, rdp); 1329 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1330 rcu_strict_gp_check_qs(); 1331 if (needwake) 1332 rcu_gp_kthread_wake(); 1333 } 1334 1335 static atomic_t *rcu_gp_slow_suppress; 1336 1337 /* Register a counter to suppress debugging grace-period delays. */ 1338 void rcu_gp_slow_register(atomic_t *rgssp) 1339 { 1340 WARN_ON_ONCE(rcu_gp_slow_suppress); 1341 1342 WRITE_ONCE(rcu_gp_slow_suppress, rgssp); 1343 } 1344 EXPORT_SYMBOL_GPL(rcu_gp_slow_register); 1345 1346 /* Unregister a counter, with NULL for not caring which. */ 1347 void rcu_gp_slow_unregister(atomic_t *rgssp) 1348 { 1349 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL); 1350 1351 WRITE_ONCE(rcu_gp_slow_suppress, NULL); 1352 } 1353 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister); 1354 1355 static bool rcu_gp_slow_is_suppressed(void) 1356 { 1357 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress); 1358 1359 return rgssp && atomic_read(rgssp); 1360 } 1361 1362 static void rcu_gp_slow(int delay) 1363 { 1364 if (!rcu_gp_slow_is_suppressed() && delay > 0 && 1365 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1366 schedule_timeout_idle(delay); 1367 } 1368 1369 static unsigned long sleep_duration; 1370 1371 /* Allow rcutorture to stall the grace-period kthread. */ 1372 void rcu_gp_set_torture_wait(int duration) 1373 { 1374 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0) 1375 WRITE_ONCE(sleep_duration, duration); 1376 } 1377 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait); 1378 1379 /* Actually implement the aforementioned wait. */ 1380 static void rcu_gp_torture_wait(void) 1381 { 1382 unsigned long duration; 1383 1384 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST)) 1385 return; 1386 duration = xchg(&sleep_duration, 0UL); 1387 if (duration > 0) { 1388 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration); 1389 schedule_timeout_idle(duration); 1390 pr_alert("%s: Wait complete\n", __func__); 1391 } 1392 } 1393 1394 /* 1395 * Handler for on_each_cpu() to invoke the target CPU's RCU core 1396 * processing. 1397 */ 1398 static void rcu_strict_gp_boundary(void *unused) 1399 { 1400 invoke_rcu_core(); 1401 } 1402 1403 // Make the polled API aware of the beginning of a grace period. 1404 static void rcu_poll_gp_seq_start(unsigned long *snap) 1405 { 1406 struct rcu_node *rnp = rcu_get_root(); 1407 1408 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1409 raw_lockdep_assert_held_rcu_node(rnp); 1410 1411 // If RCU was idle, note beginning of GP. 1412 if (!rcu_seq_state(rcu_state.gp_seq_polled)) 1413 rcu_seq_start(&rcu_state.gp_seq_polled); 1414 1415 // Either way, record current state. 1416 *snap = rcu_state.gp_seq_polled; 1417 } 1418 1419 // Make the polled API aware of the end of a grace period. 1420 static void rcu_poll_gp_seq_end(unsigned long *snap) 1421 { 1422 struct rcu_node *rnp = rcu_get_root(); 1423 1424 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1425 raw_lockdep_assert_held_rcu_node(rnp); 1426 1427 // If the previously noted GP is still in effect, record the 1428 // end of that GP. Either way, zero counter to avoid counter-wrap 1429 // problems. 1430 if (*snap && *snap == rcu_state.gp_seq_polled) { 1431 rcu_seq_end(&rcu_state.gp_seq_polled); 1432 rcu_state.gp_seq_polled_snap = 0; 1433 rcu_state.gp_seq_polled_exp_snap = 0; 1434 } else { 1435 *snap = 0; 1436 } 1437 } 1438 1439 // Make the polled API aware of the beginning of a grace period, but 1440 // where caller does not hold the root rcu_node structure's lock. 1441 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap) 1442 { 1443 unsigned long flags; 1444 struct rcu_node *rnp = rcu_get_root(); 1445 1446 if (rcu_init_invoked()) { 1447 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1448 lockdep_assert_irqs_enabled(); 1449 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1450 } 1451 rcu_poll_gp_seq_start(snap); 1452 if (rcu_init_invoked()) 1453 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1454 } 1455 1456 // Make the polled API aware of the end of a grace period, but where 1457 // caller does not hold the root rcu_node structure's lock. 1458 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap) 1459 { 1460 unsigned long flags; 1461 struct rcu_node *rnp = rcu_get_root(); 1462 1463 if (rcu_init_invoked()) { 1464 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) 1465 lockdep_assert_irqs_enabled(); 1466 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1467 } 1468 rcu_poll_gp_seq_end(snap); 1469 if (rcu_init_invoked()) 1470 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 1471 } 1472 1473 /* 1474 * There is a single llist, which is used for handling 1475 * synchronize_rcu() users' enqueued rcu_synchronize nodes. 1476 * Within this llist, there are two tail pointers: 1477 * 1478 * wait tail: Tracks the set of nodes, which need to 1479 * wait for the current GP to complete. 1480 * done tail: Tracks the set of nodes, for which grace 1481 * period has elapsed. These nodes processing 1482 * will be done as part of the cleanup work 1483 * execution by a kworker. 1484 * 1485 * At every grace period init, a new wait node is added 1486 * to the llist. This wait node is used as wait tail 1487 * for this new grace period. Given that there are a fixed 1488 * number of wait nodes, if all wait nodes are in use 1489 * (which can happen when kworker callback processing 1490 * is delayed) and additional grace period is requested. 1491 * This means, a system is slow in processing callbacks. 1492 * 1493 * TODO: If a slow processing is detected, a first node 1494 * in the llist should be used as a wait-tail for this 1495 * grace period, therefore users which should wait due 1496 * to a slow process are handled by _this_ grace period 1497 * and not next. 1498 * 1499 * Below is an illustration of how the done and wait 1500 * tail pointers move from one set of rcu_synchronize nodes 1501 * to the other, as grace periods start and finish and 1502 * nodes are processed by kworker. 1503 * 1504 * 1505 * a. Initial llist callbacks list: 1506 * 1507 * +----------+ +--------+ +-------+ 1508 * | | | | | | 1509 * | head |---------> | cb2 |--------->| cb1 | 1510 * | | | | | | 1511 * +----------+ +--------+ +-------+ 1512 * 1513 * 1514 * 1515 * b. New GP1 Start: 1516 * 1517 * WAIT TAIL 1518 * | 1519 * | 1520 * v 1521 * +----------+ +--------+ +--------+ +-------+ 1522 * | | | | | | | | 1523 * | head ------> wait |------> cb2 |------> | cb1 | 1524 * | | | head1 | | | | | 1525 * +----------+ +--------+ +--------+ +-------+ 1526 * 1527 * 1528 * 1529 * c. GP completion: 1530 * 1531 * WAIT_TAIL == DONE_TAIL 1532 * 1533 * DONE TAIL 1534 * | 1535 * | 1536 * v 1537 * +----------+ +--------+ +--------+ +-------+ 1538 * | | | | | | | | 1539 * | head ------> wait |------> cb2 |------> | cb1 | 1540 * | | | head1 | | | | | 1541 * +----------+ +--------+ +--------+ +-------+ 1542 * 1543 * 1544 * 1545 * d. New callbacks and GP2 start: 1546 * 1547 * WAIT TAIL DONE TAIL 1548 * | | 1549 * | | 1550 * v v 1551 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1552 * | | | | | | | | | | | | | | 1553 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | 1554 * | | | head2| | | | | |head1| | | | | 1555 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1556 * 1557 * 1558 * 1559 * e. GP2 completion: 1560 * 1561 * WAIT_TAIL == DONE_TAIL 1562 * DONE TAIL 1563 * | 1564 * | 1565 * v 1566 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1567 * | | | | | | | | | | | | | | 1568 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | 1569 * | | | head2| | | | | |head1| | | | | 1570 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ 1571 * 1572 * 1573 * While the llist state transitions from d to e, a kworker 1574 * can start executing rcu_sr_normal_gp_cleanup_work() and 1575 * can observe either the old done tail (@c) or the new 1576 * done tail (@e). So, done tail updates and reads need 1577 * to use the rel-acq semantics. If the concurrent kworker 1578 * observes the old done tail, the newly queued work 1579 * execution will process the updated done tail. If the 1580 * concurrent kworker observes the new done tail, then 1581 * the newly queued work will skip processing the done 1582 * tail, as workqueue semantics guarantees that the new 1583 * work is executed only after the previous one completes. 1584 * 1585 * f. kworker callbacks processing complete: 1586 * 1587 * 1588 * DONE TAIL 1589 * | 1590 * | 1591 * v 1592 * +----------+ +--------+ 1593 * | | | | 1594 * | head ------> wait | 1595 * | | | head2 | 1596 * +----------+ +--------+ 1597 * 1598 */ 1599 static bool rcu_sr_is_wait_head(struct llist_node *node) 1600 { 1601 return &(rcu_state.srs_wait_nodes)[0].node <= node && 1602 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node; 1603 } 1604 1605 static struct llist_node *rcu_sr_get_wait_head(void) 1606 { 1607 struct sr_wait_node *sr_wn; 1608 int i; 1609 1610 for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) { 1611 sr_wn = &(rcu_state.srs_wait_nodes)[i]; 1612 1613 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1)) 1614 return &sr_wn->node; 1615 } 1616 1617 return NULL; 1618 } 1619 1620 static void rcu_sr_put_wait_head(struct llist_node *node) 1621 { 1622 struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node); 1623 1624 atomic_set_release(&sr_wn->inuse, 0); 1625 } 1626 1627 /* Disabled by default. */ 1628 static int rcu_normal_wake_from_gp; 1629 module_param(rcu_normal_wake_from_gp, int, 0644); 1630 static struct workqueue_struct *sync_wq; 1631 1632 static void rcu_sr_normal_complete(struct llist_node *node) 1633 { 1634 struct rcu_synchronize *rs = container_of( 1635 (struct rcu_head *) node, struct rcu_synchronize, head); 1636 unsigned long oldstate = (unsigned long) rs->head.func; 1637 1638 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && 1639 !poll_state_synchronize_rcu(oldstate), 1640 "A full grace period is not passed yet: %lu", 1641 rcu_seq_diff(get_state_synchronize_rcu(), oldstate)); 1642 1643 /* Finally. */ 1644 complete(&rs->completion); 1645 } 1646 1647 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work) 1648 { 1649 struct llist_node *done, *rcu, *next, *head; 1650 1651 /* 1652 * This work execution can potentially execute 1653 * while a new done tail is being updated by 1654 * grace period kthread in rcu_sr_normal_gp_cleanup(). 1655 * So, read and updates of done tail need to 1656 * follow acq-rel semantics. 1657 * 1658 * Given that wq semantics guarantees that a single work 1659 * cannot execute concurrently by multiple kworkers, 1660 * the done tail list manipulations are protected here. 1661 */ 1662 done = smp_load_acquire(&rcu_state.srs_done_tail); 1663 if (!done) 1664 return; 1665 1666 WARN_ON_ONCE(!rcu_sr_is_wait_head(done)); 1667 head = done->next; 1668 done->next = NULL; 1669 1670 /* 1671 * The dummy node, which is pointed to by the 1672 * done tail which is acq-read above is not removed 1673 * here. This allows lockless additions of new 1674 * rcu_synchronize nodes in rcu_sr_normal_add_req(), 1675 * while the cleanup work executes. The dummy 1676 * nodes is removed, in next round of cleanup 1677 * work execution. 1678 */ 1679 llist_for_each_safe(rcu, next, head) { 1680 if (!rcu_sr_is_wait_head(rcu)) { 1681 rcu_sr_normal_complete(rcu); 1682 continue; 1683 } 1684 1685 rcu_sr_put_wait_head(rcu); 1686 } 1687 1688 /* Order list manipulations with atomic access. */ 1689 atomic_dec_return_release(&rcu_state.srs_cleanups_pending); 1690 } 1691 1692 /* 1693 * Helper function for rcu_gp_cleanup(). 1694 */ 1695 static void rcu_sr_normal_gp_cleanup(void) 1696 { 1697 struct llist_node *wait_tail, *next = NULL, *rcu = NULL; 1698 int done = 0; 1699 1700 wait_tail = rcu_state.srs_wait_tail; 1701 if (wait_tail == NULL) 1702 return; 1703 1704 rcu_state.srs_wait_tail = NULL; 1705 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail); 1706 WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail)); 1707 1708 /* 1709 * Process (a) and (d) cases. See an illustration. 1710 */ 1711 llist_for_each_safe(rcu, next, wait_tail->next) { 1712 if (rcu_sr_is_wait_head(rcu)) 1713 break; 1714 1715 rcu_sr_normal_complete(rcu); 1716 // It can be last, update a next on this step. 1717 wait_tail->next = next; 1718 1719 if (++done == SR_MAX_USERS_WAKE_FROM_GP) 1720 break; 1721 } 1722 1723 /* 1724 * Fast path, no more users to process except putting the second last 1725 * wait head if no inflight-workers. If there are in-flight workers, 1726 * they will remove the last wait head. 1727 * 1728 * Note that the ACQUIRE orders atomic access with list manipulation. 1729 */ 1730 if (wait_tail->next && wait_tail->next->next == NULL && 1731 rcu_sr_is_wait_head(wait_tail->next) && 1732 !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) { 1733 rcu_sr_put_wait_head(wait_tail->next); 1734 wait_tail->next = NULL; 1735 } 1736 1737 /* Concurrent sr_normal_gp_cleanup work might observe this update. */ 1738 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail); 1739 smp_store_release(&rcu_state.srs_done_tail, wait_tail); 1740 1741 /* 1742 * We schedule a work in order to perform a final processing 1743 * of outstanding users(if still left) and releasing wait-heads 1744 * added by rcu_sr_normal_gp_init() call. 1745 */ 1746 if (wait_tail->next) { 1747 atomic_inc(&rcu_state.srs_cleanups_pending); 1748 if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work)) 1749 atomic_dec(&rcu_state.srs_cleanups_pending); 1750 } 1751 } 1752 1753 /* 1754 * Helper function for rcu_gp_init(). 1755 */ 1756 static bool rcu_sr_normal_gp_init(void) 1757 { 1758 struct llist_node *first; 1759 struct llist_node *wait_head; 1760 bool start_new_poll = false; 1761 1762 first = READ_ONCE(rcu_state.srs_next.first); 1763 if (!first || rcu_sr_is_wait_head(first)) 1764 return start_new_poll; 1765 1766 wait_head = rcu_sr_get_wait_head(); 1767 if (!wait_head) { 1768 // Kick another GP to retry. 1769 start_new_poll = true; 1770 return start_new_poll; 1771 } 1772 1773 /* Inject a wait-dummy-node. */ 1774 llist_add(wait_head, &rcu_state.srs_next); 1775 1776 /* 1777 * A waiting list of rcu_synchronize nodes should be empty on 1778 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(), 1779 * rolls it over. If not, it is a BUG, warn a user. 1780 */ 1781 WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL); 1782 rcu_state.srs_wait_tail = wait_head; 1783 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail); 1784 1785 return start_new_poll; 1786 } 1787 1788 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs) 1789 { 1790 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next); 1791 } 1792 1793 /* 1794 * Initialize a new grace period. Return false if no grace period required. 1795 */ 1796 static noinline_for_stack bool rcu_gp_init(void) 1797 { 1798 unsigned long flags; 1799 unsigned long oldmask; 1800 unsigned long mask; 1801 struct rcu_data *rdp; 1802 struct rcu_node *rnp = rcu_get_root(); 1803 bool start_new_poll; 1804 1805 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1806 raw_spin_lock_irq_rcu_node(rnp); 1807 if (!rcu_state.gp_flags) { 1808 /* Spurious wakeup, tell caller to go back to sleep. */ 1809 raw_spin_unlock_irq_rcu_node(rnp); 1810 return false; 1811 } 1812 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */ 1813 1814 if (WARN_ON_ONCE(rcu_gp_in_progress())) { 1815 /* 1816 * Grace period already in progress, don't start another. 1817 * Not supposed to be able to happen. 1818 */ 1819 raw_spin_unlock_irq_rcu_node(rnp); 1820 return false; 1821 } 1822 1823 /* Advance to a new grace period and initialize state. */ 1824 record_gp_stall_check_time(); 1825 /* Record GP times before starting GP, hence rcu_seq_start(). */ 1826 rcu_seq_start(&rcu_state.gp_seq); 1827 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 1828 start_new_poll = rcu_sr_normal_gp_init(); 1829 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start")); 1830 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap); 1831 raw_spin_unlock_irq_rcu_node(rnp); 1832 1833 /* 1834 * The "start_new_poll" is set to true, only when this GP is not able 1835 * to handle anything and there are outstanding users. It happens when 1836 * the rcu_sr_normal_gp_init() function was not able to insert a dummy 1837 * separator to the llist, because there were no left any dummy-nodes. 1838 * 1839 * Number of dummy-nodes is fixed, it could be that we are run out of 1840 * them, if so we start a new pool request to repeat a try. It is rare 1841 * and it means that a system is doing a slow processing of callbacks. 1842 */ 1843 if (start_new_poll) 1844 (void) start_poll_synchronize_rcu(); 1845 1846 /* 1847 * Apply per-leaf buffered online and offline operations to 1848 * the rcu_node tree. Note that this new grace period need not 1849 * wait for subsequent online CPUs, and that RCU hooks in the CPU 1850 * offlining path, when combined with checks in this function, 1851 * will handle CPUs that are currently going offline or that will 1852 * go offline later. Please also refer to "Hotplug CPU" section 1853 * of RCU's Requirements documentation. 1854 */ 1855 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); 1856 /* Exclude CPU hotplug operations. */ 1857 rcu_for_each_leaf_node(rnp) { 1858 local_irq_disable(); 1859 arch_spin_lock(&rcu_state.ofl_lock); 1860 raw_spin_lock_rcu_node(rnp); 1861 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1862 !rnp->wait_blkd_tasks) { 1863 /* Nothing to do on this leaf rcu_node structure. */ 1864 raw_spin_unlock_rcu_node(rnp); 1865 arch_spin_unlock(&rcu_state.ofl_lock); 1866 local_irq_enable(); 1867 continue; 1868 } 1869 1870 /* Record old state, apply changes to ->qsmaskinit field. */ 1871 oldmask = rnp->qsmaskinit; 1872 rnp->qsmaskinit = rnp->qsmaskinitnext; 1873 1874 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1875 if (!oldmask != !rnp->qsmaskinit) { 1876 if (!oldmask) { /* First online CPU for rcu_node. */ 1877 if (!rnp->wait_blkd_tasks) /* Ever offline? */ 1878 rcu_init_new_rnp(rnp); 1879 } else if (rcu_preempt_has_tasks(rnp)) { 1880 rnp->wait_blkd_tasks = true; /* blocked tasks */ 1881 } else { /* Last offline CPU and can propagate. */ 1882 rcu_cleanup_dead_rnp(rnp); 1883 } 1884 } 1885 1886 /* 1887 * If all waited-on tasks from prior grace period are 1888 * done, and if all this rcu_node structure's CPUs are 1889 * still offline, propagate up the rcu_node tree and 1890 * clear ->wait_blkd_tasks. Otherwise, if one of this 1891 * rcu_node structure's CPUs has since come back online, 1892 * simply clear ->wait_blkd_tasks. 1893 */ 1894 if (rnp->wait_blkd_tasks && 1895 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) { 1896 rnp->wait_blkd_tasks = false; 1897 if (!rnp->qsmaskinit) 1898 rcu_cleanup_dead_rnp(rnp); 1899 } 1900 1901 raw_spin_unlock_rcu_node(rnp); 1902 arch_spin_unlock(&rcu_state.ofl_lock); 1903 local_irq_enable(); 1904 } 1905 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */ 1906 1907 /* 1908 * Set the quiescent-state-needed bits in all the rcu_node 1909 * structures for all currently online CPUs in breadth-first 1910 * order, starting from the root rcu_node structure, relying on the 1911 * layout of the tree within the rcu_state.node[] array. Note that 1912 * other CPUs will access only the leaves of the hierarchy, thus 1913 * seeing that no grace period is in progress, at least until the 1914 * corresponding leaf node has been initialized. 1915 * 1916 * The grace period cannot complete until the initialization 1917 * process finishes, because this kthread handles both. 1918 */ 1919 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT); 1920 rcu_for_each_node_breadth_first(rnp) { 1921 rcu_gp_slow(gp_init_delay); 1922 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1923 rdp = this_cpu_ptr(&rcu_data); 1924 rcu_preempt_check_blocked_tasks(rnp); 1925 rnp->qsmask = rnp->qsmaskinit; 1926 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq); 1927 if (rnp == rdp->mynode) 1928 (void)__note_gp_changes(rnp, rdp); 1929 rcu_preempt_boost_start_gp(rnp); 1930 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq, 1931 rnp->level, rnp->grplo, 1932 rnp->grphi, rnp->qsmask); 1933 /* Quiescent states for tasks on any now-offline CPUs. */ 1934 mask = rnp->qsmask & ~rnp->qsmaskinitnext; 1935 rnp->rcu_gp_init_mask = mask; 1936 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp)) 1937 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 1938 else 1939 raw_spin_unlock_irq_rcu_node(rnp); 1940 cond_resched_tasks_rcu_qs(); 1941 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1942 } 1943 1944 // If strict, make all CPUs aware of new grace period. 1945 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 1946 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 1947 1948 return true; 1949 } 1950 1951 /* 1952 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 1953 * time. 1954 */ 1955 static bool rcu_gp_fqs_check_wake(int *gfp) 1956 { 1957 struct rcu_node *rnp = rcu_get_root(); 1958 1959 // If under overload conditions, force an immediate FQS scan. 1960 if (*gfp & RCU_GP_FLAG_OVLD) 1961 return true; 1962 1963 // Someone like call_rcu() requested a force-quiescent-state scan. 1964 *gfp = READ_ONCE(rcu_state.gp_flags); 1965 if (*gfp & RCU_GP_FLAG_FQS) 1966 return true; 1967 1968 // The current grace period has completed. 1969 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) 1970 return true; 1971 1972 return false; 1973 } 1974 1975 /* 1976 * Do one round of quiescent-state forcing. 1977 */ 1978 static void rcu_gp_fqs(bool first_time) 1979 { 1980 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall); 1981 struct rcu_node *rnp = rcu_get_root(); 1982 1983 WRITE_ONCE(rcu_state.gp_activity, jiffies); 1984 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1); 1985 1986 WARN_ON_ONCE(nr_fqs > 3); 1987 /* Only countdown nr_fqs for stall purposes if jiffies moves. */ 1988 if (nr_fqs) { 1989 if (nr_fqs == 1) { 1990 WRITE_ONCE(rcu_state.jiffies_stall, 1991 jiffies + rcu_jiffies_till_stall_check()); 1992 } 1993 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs); 1994 } 1995 1996 if (first_time) { 1997 /* Collect dyntick-idle snapshots. */ 1998 force_qs_rnp(dyntick_save_progress_counter); 1999 } else { 2000 /* Handle dyntick-idle and offline CPUs. */ 2001 force_qs_rnp(rcu_implicit_dynticks_qs); 2002 } 2003 /* Clear flag to prevent immediate re-entry. */ 2004 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2005 raw_spin_lock_irq_rcu_node(rnp); 2006 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS); 2007 raw_spin_unlock_irq_rcu_node(rnp); 2008 } 2009 } 2010 2011 /* 2012 * Loop doing repeated quiescent-state forcing until the grace period ends. 2013 */ 2014 static noinline_for_stack void rcu_gp_fqs_loop(void) 2015 { 2016 bool first_gp_fqs = true; 2017 int gf = 0; 2018 unsigned long j; 2019 int ret; 2020 struct rcu_node *rnp = rcu_get_root(); 2021 2022 j = READ_ONCE(jiffies_till_first_fqs); 2023 if (rcu_state.cbovld) 2024 gf = RCU_GP_FLAG_OVLD; 2025 ret = 0; 2026 for (;;) { 2027 if (rcu_state.cbovld) { 2028 j = (j + 2) / 3; 2029 if (j <= 0) 2030 j = 1; 2031 } 2032 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) { 2033 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j); 2034 /* 2035 * jiffies_force_qs before RCU_GP_WAIT_FQS state 2036 * update; required for stall checks. 2037 */ 2038 smp_wmb(); 2039 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, 2040 jiffies + (j ? 3 * j : 2)); 2041 } 2042 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2043 TPS("fqswait")); 2044 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS); 2045 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq, 2046 rcu_gp_fqs_check_wake(&gf), j); 2047 rcu_gp_torture_wait(); 2048 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS); 2049 /* Locking provides needed memory barriers. */ 2050 /* 2051 * Exit the loop if the root rcu_node structure indicates that the grace period 2052 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check 2053 * is required only for single-node rcu_node trees because readers blocking 2054 * the current grace period are queued only on leaf rcu_node structures. 2055 * For multi-node trees, checking the root node's ->qsmask suffices, because a 2056 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from 2057 * the corresponding leaf nodes have passed through their quiescent state. 2058 */ 2059 if (!READ_ONCE(rnp->qsmask) && 2060 !rcu_preempt_blocked_readers_cgp(rnp)) 2061 break; 2062 /* If time for quiescent-state forcing, do it. */ 2063 if (!time_after(rcu_state.jiffies_force_qs, jiffies) || 2064 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) { 2065 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2066 TPS("fqsstart")); 2067 rcu_gp_fqs(first_gp_fqs); 2068 gf = 0; 2069 if (first_gp_fqs) { 2070 first_gp_fqs = false; 2071 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0; 2072 } 2073 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2074 TPS("fqsend")); 2075 cond_resched_tasks_rcu_qs(); 2076 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2077 ret = 0; /* Force full wait till next FQS. */ 2078 j = READ_ONCE(jiffies_till_next_fqs); 2079 } else { 2080 /* Deal with stray signal. */ 2081 cond_resched_tasks_rcu_qs(); 2082 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2083 WARN_ON(signal_pending(current)); 2084 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2085 TPS("fqswaitsig")); 2086 ret = 1; /* Keep old FQS timing. */ 2087 j = jiffies; 2088 if (time_after(jiffies, rcu_state.jiffies_force_qs)) 2089 j = 1; 2090 else 2091 j = rcu_state.jiffies_force_qs - j; 2092 gf = 0; 2093 } 2094 } 2095 } 2096 2097 /* 2098 * Clean up after the old grace period. 2099 */ 2100 static noinline void rcu_gp_cleanup(void) 2101 { 2102 int cpu; 2103 bool needgp = false; 2104 unsigned long gp_duration; 2105 unsigned long new_gp_seq; 2106 bool offloaded; 2107 struct rcu_data *rdp; 2108 struct rcu_node *rnp = rcu_get_root(); 2109 struct swait_queue_head *sq; 2110 2111 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2112 raw_spin_lock_irq_rcu_node(rnp); 2113 rcu_state.gp_end = jiffies; 2114 gp_duration = rcu_state.gp_end - rcu_state.gp_start; 2115 if (gp_duration > rcu_state.gp_max) 2116 rcu_state.gp_max = gp_duration; 2117 2118 /* 2119 * We know the grace period is complete, but to everyone else 2120 * it appears to still be ongoing. But it is also the case 2121 * that to everyone else it looks like there is nothing that 2122 * they can do to advance the grace period. It is therefore 2123 * safe for us to drop the lock in order to mark the grace 2124 * period as completed in all of the rcu_node structures. 2125 */ 2126 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap); 2127 raw_spin_unlock_irq_rcu_node(rnp); 2128 2129 /* 2130 * Propagate new ->gp_seq value to rcu_node structures so that 2131 * other CPUs don't have to wait until the start of the next grace 2132 * period to process their callbacks. This also avoids some nasty 2133 * RCU grace-period initialization races by forcing the end of 2134 * the current grace period to be completely recorded in all of 2135 * the rcu_node structures before the beginning of the next grace 2136 * period is recorded in any of the rcu_node structures. 2137 */ 2138 new_gp_seq = rcu_state.gp_seq; 2139 rcu_seq_end(&new_gp_seq); 2140 rcu_for_each_node_breadth_first(rnp) { 2141 raw_spin_lock_irq_rcu_node(rnp); 2142 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) 2143 dump_blkd_tasks(rnp, 10); 2144 WARN_ON_ONCE(rnp->qsmask); 2145 WRITE_ONCE(rnp->gp_seq, new_gp_seq); 2146 if (!rnp->parent) 2147 smp_mb(); // Order against failing poll_state_synchronize_rcu_full(). 2148 rdp = this_cpu_ptr(&rcu_data); 2149 if (rnp == rdp->mynode) 2150 needgp = __note_gp_changes(rnp, rdp) || needgp; 2151 /* smp_mb() provided by prior unlock-lock pair. */ 2152 needgp = rcu_future_gp_cleanup(rnp) || needgp; 2153 // Reset overload indication for CPUs no longer overloaded 2154 if (rcu_is_leaf_node(rnp)) 2155 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) { 2156 rdp = per_cpu_ptr(&rcu_data, cpu); 2157 check_cb_ovld_locked(rdp, rnp); 2158 } 2159 sq = rcu_nocb_gp_get(rnp); 2160 raw_spin_unlock_irq_rcu_node(rnp); 2161 rcu_nocb_gp_cleanup(sq); 2162 cond_resched_tasks_rcu_qs(); 2163 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2164 rcu_gp_slow(gp_cleanup_delay); 2165 } 2166 rnp = rcu_get_root(); 2167 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */ 2168 2169 /* Declare grace period done, trace first to use old GP number. */ 2170 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); 2171 rcu_seq_end(&rcu_state.gp_seq); 2172 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); 2173 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE); 2174 /* Check for GP requests since above loop. */ 2175 rdp = this_cpu_ptr(&rcu_data); 2176 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { 2177 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, 2178 TPS("CleanupMore")); 2179 needgp = true; 2180 } 2181 /* Advance CBs to reduce false positives below. */ 2182 offloaded = rcu_rdp_is_offloaded(rdp); 2183 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { 2184 2185 // We get here if a grace period was needed (“needgp”) 2186 // and the above call to rcu_accelerate_cbs() did not set 2187 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records 2188 // the need for another grace period). The purpose 2189 // of the “offloaded” check is to avoid invoking 2190 // rcu_accelerate_cbs() on an offloaded CPU because we do not 2191 // hold the ->nocb_lock needed to safely access an offloaded 2192 // ->cblist. We do not want to acquire that lock because 2193 // it can be heavily contended during callback floods. 2194 2195 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT); 2196 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 2197 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq")); 2198 } else { 2199 2200 // We get here either if there is no need for an 2201 // additional grace period or if rcu_accelerate_cbs() has 2202 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags. 2203 // So all we need to do is to clear all of the other 2204 // ->gp_flags bits. 2205 2206 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT); 2207 } 2208 raw_spin_unlock_irq_rcu_node(rnp); 2209 2210 // Make synchronize_rcu() users aware of the end of old grace period. 2211 rcu_sr_normal_gp_cleanup(); 2212 2213 // If strict, make all CPUs aware of the end of the old grace period. 2214 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2215 on_each_cpu(rcu_strict_gp_boundary, NULL, 0); 2216 } 2217 2218 /* 2219 * Body of kthread that handles grace periods. 2220 */ 2221 static int __noreturn rcu_gp_kthread(void *unused) 2222 { 2223 rcu_bind_gp_kthread(); 2224 for (;;) { 2225 2226 /* Handle grace-period start. */ 2227 for (;;) { 2228 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2229 TPS("reqwait")); 2230 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS); 2231 swait_event_idle_exclusive(rcu_state.gp_wq, 2232 READ_ONCE(rcu_state.gp_flags) & 2233 RCU_GP_FLAG_INIT); 2234 rcu_gp_torture_wait(); 2235 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS); 2236 /* Locking provides needed memory barrier. */ 2237 if (rcu_gp_init()) 2238 break; 2239 cond_resched_tasks_rcu_qs(); 2240 WRITE_ONCE(rcu_state.gp_activity, jiffies); 2241 WARN_ON(signal_pending(current)); 2242 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, 2243 TPS("reqwaitsig")); 2244 } 2245 2246 /* Handle quiescent-state forcing. */ 2247 rcu_gp_fqs_loop(); 2248 2249 /* Handle grace-period end. */ 2250 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP); 2251 rcu_gp_cleanup(); 2252 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED); 2253 } 2254 } 2255 2256 /* 2257 * Report a full set of quiescent states to the rcu_state data structure. 2258 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if 2259 * another grace period is required. Whether we wake the grace-period 2260 * kthread or it awakens itself for the next round of quiescent-state 2261 * forcing, that kthread will clean up after the just-completed grace 2262 * period. Note that the caller must hold rnp->lock, which is released 2263 * before return. 2264 */ 2265 static void rcu_report_qs_rsp(unsigned long flags) 2266 __releases(rcu_get_root()->lock) 2267 { 2268 raw_lockdep_assert_held_rcu_node(rcu_get_root()); 2269 WARN_ON_ONCE(!rcu_gp_in_progress()); 2270 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS); 2271 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags); 2272 rcu_gp_kthread_wake(); 2273 } 2274 2275 /* 2276 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 2277 * Allows quiescent states for a group of CPUs to be reported at one go 2278 * to the specified rcu_node structure, though all the CPUs in the group 2279 * must be represented by the same rcu_node structure (which need not be a 2280 * leaf rcu_node structure, though it often will be). The gps parameter 2281 * is the grace-period snapshot, which means that the quiescent states 2282 * are valid only if rnp->gp_seq is equal to gps. That structure's lock 2283 * must be held upon entry, and it is released before return. 2284 * 2285 * As a special case, if mask is zero, the bit-already-cleared check is 2286 * disabled. This allows propagating quiescent state due to resumed tasks 2287 * during grace-period initialization. 2288 */ 2289 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, 2290 unsigned long gps, unsigned long flags) 2291 __releases(rnp->lock) 2292 { 2293 unsigned long oldmask = 0; 2294 struct rcu_node *rnp_c; 2295 2296 raw_lockdep_assert_held_rcu_node(rnp); 2297 2298 /* Walk up the rcu_node hierarchy. */ 2299 for (;;) { 2300 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { 2301 2302 /* 2303 * Our bit has already been cleared, or the 2304 * relevant grace period is already over, so done. 2305 */ 2306 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2307 return; 2308 } 2309 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */ 2310 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2311 rcu_preempt_blocked_readers_cgp(rnp)); 2312 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask); 2313 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq, 2314 mask, rnp->qsmask, rnp->level, 2315 rnp->grplo, rnp->grphi, 2316 !!rnp->gp_tasks); 2317 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2318 2319 /* Other bits still set at this level, so done. */ 2320 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2321 return; 2322 } 2323 rnp->completedqs = rnp->gp_seq; 2324 mask = rnp->grpmask; 2325 if (rnp->parent == NULL) { 2326 2327 /* No more levels. Exit loop holding root lock. */ 2328 2329 break; 2330 } 2331 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2332 rnp_c = rnp; 2333 rnp = rnp->parent; 2334 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2335 oldmask = READ_ONCE(rnp_c->qsmask); 2336 } 2337 2338 /* 2339 * Get here if we are the last CPU to pass through a quiescent 2340 * state for this grace period. Invoke rcu_report_qs_rsp() 2341 * to clean up and start the next grace period if one is needed. 2342 */ 2343 rcu_report_qs_rsp(flags); /* releases rnp->lock. */ 2344 } 2345 2346 /* 2347 * Record a quiescent state for all tasks that were previously queued 2348 * on the specified rcu_node structure and that were blocking the current 2349 * RCU grace period. The caller must hold the corresponding rnp->lock with 2350 * irqs disabled, and this lock is released upon return, but irqs remain 2351 * disabled. 2352 */ 2353 static void __maybe_unused 2354 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 2355 __releases(rnp->lock) 2356 { 2357 unsigned long gps; 2358 unsigned long mask; 2359 struct rcu_node *rnp_p; 2360 2361 raw_lockdep_assert_held_rcu_node(rnp); 2362 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) || 2363 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) || 2364 rnp->qsmask != 0) { 2365 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2366 return; /* Still need more quiescent states! */ 2367 } 2368 2369 rnp->completedqs = rnp->gp_seq; 2370 rnp_p = rnp->parent; 2371 if (rnp_p == NULL) { 2372 /* 2373 * Only one rcu_node structure in the tree, so don't 2374 * try to report up to its nonexistent parent! 2375 */ 2376 rcu_report_qs_rsp(flags); 2377 return; 2378 } 2379 2380 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */ 2381 gps = rnp->gp_seq; 2382 mask = rnp->grpmask; 2383 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2384 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2385 rcu_report_qs_rnp(mask, rnp_p, gps, flags); 2386 } 2387 2388 /* 2389 * Record a quiescent state for the specified CPU to that CPU's rcu_data 2390 * structure. This must be called from the specified CPU. 2391 */ 2392 static void 2393 rcu_report_qs_rdp(struct rcu_data *rdp) 2394 { 2395 unsigned long flags; 2396 unsigned long mask; 2397 bool needacc = false; 2398 struct rcu_node *rnp; 2399 2400 WARN_ON_ONCE(rdp->cpu != smp_processor_id()); 2401 rnp = rdp->mynode; 2402 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2403 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || 2404 rdp->gpwrap) { 2405 2406 /* 2407 * The grace period in which this quiescent state was 2408 * recorded has ended, so don't report it upwards. 2409 * We will instead need a new quiescent state that lies 2410 * within the current grace period. 2411 */ 2412 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ 2413 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2414 return; 2415 } 2416 mask = rdp->grpmask; 2417 rdp->core_needs_qs = false; 2418 if ((rnp->qsmask & mask) == 0) { 2419 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2420 } else { 2421 /* 2422 * This GP can't end until cpu checks in, so all of our 2423 * callbacks can be processed during the next GP. 2424 * 2425 * NOCB kthreads have their own way to deal with that... 2426 */ 2427 if (!rcu_rdp_is_offloaded(rdp)) { 2428 /* 2429 * The current GP has not yet ended, so it 2430 * should not be possible for rcu_accelerate_cbs() 2431 * to return true. So complain, but don't awaken. 2432 */ 2433 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp)); 2434 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { 2435 /* 2436 * ...but NOCB kthreads may miss or delay callbacks acceleration 2437 * if in the middle of a (de-)offloading process. 2438 */ 2439 needacc = true; 2440 } 2441 2442 rcu_disable_urgency_upon_qs(rdp); 2443 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2444 /* ^^^ Released rnp->lock */ 2445 2446 if (needacc) { 2447 rcu_nocb_lock_irqsave(rdp, flags); 2448 rcu_accelerate_cbs_unlocked(rnp, rdp); 2449 rcu_nocb_unlock_irqrestore(rdp, flags); 2450 } 2451 } 2452 } 2453 2454 /* 2455 * Check to see if there is a new grace period of which this CPU 2456 * is not yet aware, and if so, set up local rcu_data state for it. 2457 * Otherwise, see if this CPU has just passed through its first 2458 * quiescent state for this grace period, and record that fact if so. 2459 */ 2460 static void 2461 rcu_check_quiescent_state(struct rcu_data *rdp) 2462 { 2463 /* Check for grace-period ends and beginnings. */ 2464 note_gp_changes(rdp); 2465 2466 /* 2467 * Does this CPU still need to do its part for current grace period? 2468 * If no, return and let the other CPUs do their part as well. 2469 */ 2470 if (!rdp->core_needs_qs) 2471 return; 2472 2473 /* 2474 * Was there a quiescent state since the beginning of the grace 2475 * period? If no, then exit and wait for the next call. 2476 */ 2477 if (rdp->cpu_no_qs.b.norm) 2478 return; 2479 2480 /* 2481 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 2482 * judge of that). 2483 */ 2484 rcu_report_qs_rdp(rdp); 2485 } 2486 2487 /* Return true if callback-invocation time limit exceeded. */ 2488 static bool rcu_do_batch_check_time(long count, long tlimit, 2489 bool jlimit_check, unsigned long jlimit) 2490 { 2491 // Invoke local_clock() only once per 32 consecutive callbacks. 2492 return unlikely(tlimit) && 2493 (!likely(count & 31) || 2494 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) && 2495 jlimit_check && time_after(jiffies, jlimit))) && 2496 local_clock() >= tlimit; 2497 } 2498 2499 /* 2500 * Invoke any RCU callbacks that have made it to the end of their grace 2501 * period. Throttle as specified by rdp->blimit. 2502 */ 2503 static void rcu_do_batch(struct rcu_data *rdp) 2504 { 2505 long bl; 2506 long count = 0; 2507 int div; 2508 bool __maybe_unused empty; 2509 unsigned long flags; 2510 unsigned long jlimit; 2511 bool jlimit_check = false; 2512 long pending; 2513 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 2514 struct rcu_head *rhp; 2515 long tlimit = 0; 2516 2517 /* If no callbacks are ready, just return. */ 2518 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { 2519 trace_rcu_batch_start(rcu_state.name, 2520 rcu_segcblist_n_cbs(&rdp->cblist), 0); 2521 trace_rcu_batch_end(rcu_state.name, 0, 2522 !rcu_segcblist_empty(&rdp->cblist), 2523 need_resched(), is_idle_task(current), 2524 rcu_is_callbacks_kthread(rdp)); 2525 return; 2526 } 2527 2528 /* 2529 * Extract the list of ready callbacks, disabling IRQs to prevent 2530 * races with call_rcu() from interrupt handlers. Leave the 2531 * callback counts, as rcu_barrier() needs to be conservative. 2532 * 2533 * Callbacks execution is fully ordered against preceding grace period 2534 * completion (materialized by rnp->gp_seq update) thanks to the 2535 * smp_mb__after_unlock_lock() upon node locking required for callbacks 2536 * advancing. In NOCB mode this ordering is then further relayed through 2537 * the nocb locking that protects both callbacks advancing and extraction. 2538 */ 2539 rcu_nocb_lock_irqsave(rdp, flags); 2540 WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); 2541 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL); 2542 div = READ_ONCE(rcu_divisor); 2543 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div; 2544 bl = max(rdp->blimit, pending >> div); 2545 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) && 2546 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) { 2547 const long npj = NSEC_PER_SEC / HZ; 2548 long rrn = READ_ONCE(rcu_resched_ns); 2549 2550 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn; 2551 tlimit = local_clock() + rrn; 2552 jlimit = jiffies + (rrn + npj + 1) / npj; 2553 jlimit_check = true; 2554 } 2555 trace_rcu_batch_start(rcu_state.name, 2556 rcu_segcblist_n_cbs(&rdp->cblist), bl); 2557 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); 2558 if (rcu_rdp_is_offloaded(rdp)) 2559 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 2560 2561 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); 2562 rcu_nocb_unlock_irqrestore(rdp, flags); 2563 2564 /* Invoke callbacks. */ 2565 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2566 rhp = rcu_cblist_dequeue(&rcl); 2567 2568 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { 2569 rcu_callback_t f; 2570 2571 count++; 2572 debug_rcu_head_unqueue(rhp); 2573 2574 rcu_lock_acquire(&rcu_callback_map); 2575 trace_rcu_invoke_callback(rcu_state.name, rhp); 2576 2577 f = rhp->func; 2578 debug_rcu_head_callback(rhp); 2579 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); 2580 f(rhp); 2581 2582 rcu_lock_release(&rcu_callback_map); 2583 2584 /* 2585 * Stop only if limit reached and CPU has something to do. 2586 */ 2587 if (in_serving_softirq()) { 2588 if (count >= bl && (need_resched() || !is_idle_task(current))) 2589 break; 2590 /* 2591 * Make sure we don't spend too much time here and deprive other 2592 * softirq vectors of CPU cycles. 2593 */ 2594 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) 2595 break; 2596 } else { 2597 // In rcuc/rcuoc context, so no worries about 2598 // depriving other softirq vectors of CPU cycles. 2599 local_bh_enable(); 2600 lockdep_assert_irqs_enabled(); 2601 cond_resched_tasks_rcu_qs(); 2602 lockdep_assert_irqs_enabled(); 2603 local_bh_disable(); 2604 // But rcuc kthreads can delay quiescent-state 2605 // reporting, so check time limits for them. 2606 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING && 2607 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) { 2608 rdp->rcu_cpu_has_work = 1; 2609 break; 2610 } 2611 } 2612 } 2613 2614 rcu_nocb_lock_irqsave(rdp, flags); 2615 rdp->n_cbs_invoked += count; 2616 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), 2617 is_idle_task(current), rcu_is_callbacks_kthread(rdp)); 2618 2619 /* Update counts and requeue any remaining callbacks. */ 2620 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); 2621 rcu_segcblist_add_len(&rdp->cblist, -count); 2622 2623 /* Reinstate batch limit if we have worked down the excess. */ 2624 count = rcu_segcblist_n_cbs(&rdp->cblist); 2625 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) 2626 rdp->blimit = blimit; 2627 2628 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 2629 if (count == 0 && rdp->qlen_last_fqs_check != 0) { 2630 rdp->qlen_last_fqs_check = 0; 2631 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 2632 } else if (count < rdp->qlen_last_fqs_check - qhimark) 2633 rdp->qlen_last_fqs_check = count; 2634 2635 /* 2636 * The following usually indicates a double call_rcu(). To track 2637 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. 2638 */ 2639 empty = rcu_segcblist_empty(&rdp->cblist); 2640 WARN_ON_ONCE(count == 0 && !empty); 2641 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && 2642 count != 0 && empty); 2643 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); 2644 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); 2645 2646 rcu_nocb_unlock_irqrestore(rdp, flags); 2647 2648 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2649 } 2650 2651 /* 2652 * This function is invoked from each scheduling-clock interrupt, 2653 * and checks to see if this CPU is in a non-context-switch quiescent 2654 * state, for example, user mode or idle loop. It also schedules RCU 2655 * core processing. If the current grace period has gone on too long, 2656 * it will ask the scheduler to manufacture a context switch for the sole 2657 * purpose of providing the needed quiescent state. 2658 */ 2659 void rcu_sched_clock_irq(int user) 2660 { 2661 unsigned long j; 2662 2663 if (IS_ENABLED(CONFIG_PROVE_RCU)) { 2664 j = jiffies; 2665 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock))); 2666 __this_cpu_write(rcu_data.last_sched_clock, j); 2667 } 2668 trace_rcu_utilization(TPS("Start scheduler-tick")); 2669 lockdep_assert_irqs_disabled(); 2670 raw_cpu_inc(rcu_data.ticks_this_gp); 2671 /* The load-acquire pairs with the store-release setting to true. */ 2672 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { 2673 /* Idle and userspace execution already are quiescent states. */ 2674 if (!rcu_is_cpu_rrupt_from_idle() && !user) { 2675 set_tsk_need_resched(current); 2676 set_preempt_need_resched(); 2677 } 2678 __this_cpu_write(rcu_data.rcu_urgent_qs, false); 2679 } 2680 rcu_flavor_sched_clock_irq(user); 2681 if (rcu_pending(user)) 2682 invoke_rcu_core(); 2683 if (user || rcu_is_cpu_rrupt_from_idle()) 2684 rcu_note_voluntary_context_switch(current); 2685 lockdep_assert_irqs_disabled(); 2686 2687 trace_rcu_utilization(TPS("End scheduler-tick")); 2688 } 2689 2690 /* 2691 * Scan the leaf rcu_node structures. For each structure on which all 2692 * CPUs have reported a quiescent state and on which there are tasks 2693 * blocking the current grace period, initiate RCU priority boosting. 2694 * Otherwise, invoke the specified function to check dyntick state for 2695 * each CPU that has not yet reported a quiescent state. 2696 */ 2697 static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) 2698 { 2699 int cpu; 2700 unsigned long flags; 2701 struct rcu_node *rnp; 2702 2703 rcu_state.cbovld = rcu_state.cbovldnext; 2704 rcu_state.cbovldnext = false; 2705 rcu_for_each_leaf_node(rnp) { 2706 unsigned long mask = 0; 2707 unsigned long rsmask = 0; 2708 2709 cond_resched_tasks_rcu_qs(); 2710 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2711 rcu_state.cbovldnext |= !!rnp->cbovldmask; 2712 if (rnp->qsmask == 0) { 2713 if (rcu_preempt_blocked_readers_cgp(rnp)) { 2714 /* 2715 * No point in scanning bits because they 2716 * are all zero. But we might need to 2717 * priority-boost blocked readers. 2718 */ 2719 rcu_initiate_boost(rnp, flags); 2720 /* rcu_initiate_boost() releases rnp->lock */ 2721 continue; 2722 } 2723 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2724 continue; 2725 } 2726 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { 2727 struct rcu_data *rdp; 2728 int ret; 2729 2730 rdp = per_cpu_ptr(&rcu_data, cpu); 2731 ret = f(rdp); 2732 if (ret > 0) { 2733 mask |= rdp->grpmask; 2734 rcu_disable_urgency_upon_qs(rdp); 2735 } 2736 if (ret < 0) 2737 rsmask |= rdp->grpmask; 2738 } 2739 if (mask != 0) { 2740 /* Idle/offline CPUs, report (releases rnp->lock). */ 2741 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 2742 } else { 2743 /* Nothing to do here, so just drop the lock. */ 2744 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2745 } 2746 2747 for_each_leaf_node_cpu_mask(rnp, cpu, rsmask) 2748 resched_cpu(cpu); 2749 } 2750 } 2751 2752 /* 2753 * Force quiescent states on reluctant CPUs, and also detect which 2754 * CPUs are in dyntick-idle mode. 2755 */ 2756 void rcu_force_quiescent_state(void) 2757 { 2758 unsigned long flags; 2759 bool ret; 2760 struct rcu_node *rnp; 2761 struct rcu_node *rnp_old = NULL; 2762 2763 if (!rcu_gp_in_progress()) 2764 return; 2765 /* Funnel through hierarchy to reduce memory contention. */ 2766 rnp = raw_cpu_read(rcu_data.mynode); 2767 for (; rnp != NULL; rnp = rnp->parent) { 2768 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) || 2769 !raw_spin_trylock(&rnp->fqslock); 2770 if (rnp_old != NULL) 2771 raw_spin_unlock(&rnp_old->fqslock); 2772 if (ret) 2773 return; 2774 rnp_old = rnp; 2775 } 2776 /* rnp_old == rcu_get_root(), rnp == NULL. */ 2777 2778 /* Reached the root of the rcu_node tree, acquire lock. */ 2779 raw_spin_lock_irqsave_rcu_node(rnp_old, flags); 2780 raw_spin_unlock(&rnp_old->fqslock); 2781 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) { 2782 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2783 return; /* Someone beat us to it. */ 2784 } 2785 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS); 2786 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags); 2787 rcu_gp_kthread_wake(); 2788 } 2789 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 2790 2791 // Workqueue handler for an RCU reader for kernels enforcing struct RCU 2792 // grace periods. 2793 static void strict_work_handler(struct work_struct *work) 2794 { 2795 rcu_read_lock(); 2796 rcu_read_unlock(); 2797 } 2798 2799 /* Perform RCU core processing work for the current CPU. */ 2800 static __latent_entropy void rcu_core(void) 2801 { 2802 unsigned long flags; 2803 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); 2804 struct rcu_node *rnp = rdp->mynode; 2805 /* 2806 * On RT rcu_core() can be preempted when IRQs aren't disabled. 2807 * Therefore this function can race with concurrent NOCB (de-)offloading 2808 * on this CPU and the below condition must be considered volatile. 2809 * However if we race with: 2810 * 2811 * _ Offloading: In the worst case we accelerate or process callbacks 2812 * concurrently with NOCB kthreads. We are guaranteed to 2813 * call rcu_nocb_lock() if that happens. 2814 * 2815 * _ Deoffloading: In the worst case we miss callbacks acceleration or 2816 * processing. This is fine because the early stage 2817 * of deoffloading invokes rcu_core() after setting 2818 * SEGCBLIST_RCU_CORE. So we guarantee that we'll process 2819 * what could have been dismissed without the need to wait 2820 * for the next rcu_pending() check in the next jiffy. 2821 */ 2822 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); 2823 2824 if (cpu_is_offline(smp_processor_id())) 2825 return; 2826 trace_rcu_utilization(TPS("Start RCU core")); 2827 WARN_ON_ONCE(!rdp->beenonline); 2828 2829 /* Report any deferred quiescent states if preemption enabled. */ 2830 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) { 2831 rcu_preempt_deferred_qs(current); 2832 } else if (rcu_preempt_need_deferred_qs(current)) { 2833 set_tsk_need_resched(current); 2834 set_preempt_need_resched(); 2835 } 2836 2837 /* Update RCU state based on any recent quiescent states. */ 2838 rcu_check_quiescent_state(rdp); 2839 2840 /* No grace period and unregistered callbacks? */ 2841 if (!rcu_gp_in_progress() && 2842 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { 2843 rcu_nocb_lock_irqsave(rdp, flags); 2844 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 2845 rcu_accelerate_cbs_unlocked(rnp, rdp); 2846 rcu_nocb_unlock_irqrestore(rdp, flags); 2847 } 2848 2849 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); 2850 2851 /* If there are callbacks ready, invoke them. */ 2852 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && 2853 likely(READ_ONCE(rcu_scheduler_fully_active))) { 2854 rcu_do_batch(rdp); 2855 /* Re-invoke RCU core processing if there are callbacks remaining. */ 2856 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2857 invoke_rcu_core(); 2858 } 2859 2860 /* Do any needed deferred wakeups of rcuo kthreads. */ 2861 do_nocb_deferred_wakeup(rdp); 2862 trace_rcu_utilization(TPS("End RCU core")); 2863 2864 // If strict GPs, schedule an RCU reader in a clean environment. 2865 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) 2866 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work); 2867 } 2868 2869 static void rcu_core_si(struct softirq_action *h) 2870 { 2871 rcu_core(); 2872 } 2873 2874 static void rcu_wake_cond(struct task_struct *t, int status) 2875 { 2876 /* 2877 * If the thread is yielding, only wake it when this 2878 * is invoked from idle 2879 */ 2880 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) 2881 wake_up_process(t); 2882 } 2883 2884 static void invoke_rcu_core_kthread(void) 2885 { 2886 struct task_struct *t; 2887 unsigned long flags; 2888 2889 local_irq_save(flags); 2890 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1); 2891 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task); 2892 if (t != NULL && t != current) 2893 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status)); 2894 local_irq_restore(flags); 2895 } 2896 2897 /* 2898 * Wake up this CPU's rcuc kthread to do RCU core processing. 2899 */ 2900 static void invoke_rcu_core(void) 2901 { 2902 if (!cpu_online(smp_processor_id())) 2903 return; 2904 if (use_softirq) 2905 raise_softirq(RCU_SOFTIRQ); 2906 else 2907 invoke_rcu_core_kthread(); 2908 } 2909 2910 static void rcu_cpu_kthread_park(unsigned int cpu) 2911 { 2912 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; 2913 } 2914 2915 static int rcu_cpu_kthread_should_run(unsigned int cpu) 2916 { 2917 return __this_cpu_read(rcu_data.rcu_cpu_has_work); 2918 } 2919 2920 /* 2921 * Per-CPU kernel thread that invokes RCU callbacks. This replaces 2922 * the RCU softirq used in configurations of RCU that do not support RCU 2923 * priority boosting. 2924 */ 2925 static void rcu_cpu_kthread(unsigned int cpu) 2926 { 2927 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status); 2928 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work); 2929 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity); 2930 int spincnt; 2931 2932 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run")); 2933 for (spincnt = 0; spincnt < 10; spincnt++) { 2934 WRITE_ONCE(*j, jiffies); 2935 local_bh_disable(); 2936 *statusp = RCU_KTHREAD_RUNNING; 2937 local_irq_disable(); 2938 work = *workp; 2939 WRITE_ONCE(*workp, 0); 2940 local_irq_enable(); 2941 if (work) 2942 rcu_core(); 2943 local_bh_enable(); 2944 if (!READ_ONCE(*workp)) { 2945 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); 2946 *statusp = RCU_KTHREAD_WAITING; 2947 return; 2948 } 2949 } 2950 *statusp = RCU_KTHREAD_YIELDING; 2951 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); 2952 schedule_timeout_idle(2); 2953 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); 2954 *statusp = RCU_KTHREAD_WAITING; 2955 WRITE_ONCE(*j, jiffies); 2956 } 2957 2958 static struct smp_hotplug_thread rcu_cpu_thread_spec = { 2959 .store = &rcu_data.rcu_cpu_kthread_task, 2960 .thread_should_run = rcu_cpu_kthread_should_run, 2961 .thread_fn = rcu_cpu_kthread, 2962 .thread_comm = "rcuc/%u", 2963 .setup = rcu_cpu_kthread_setup, 2964 .park = rcu_cpu_kthread_park, 2965 }; 2966 2967 /* 2968 * Spawn per-CPU RCU core processing kthreads. 2969 */ 2970 static int __init rcu_spawn_core_kthreads(void) 2971 { 2972 int cpu; 2973 2974 for_each_possible_cpu(cpu) 2975 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0; 2976 if (use_softirq) 2977 return 0; 2978 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), 2979 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__); 2980 return 0; 2981 } 2982 2983 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func) 2984 { 2985 rcu_segcblist_enqueue(&rdp->cblist, head); 2986 if (__is_kvfree_rcu_offset((unsigned long)func)) 2987 trace_rcu_kvfree_callback(rcu_state.name, head, 2988 (unsigned long)func, 2989 rcu_segcblist_n_cbs(&rdp->cblist)); 2990 else 2991 trace_rcu_callback(rcu_state.name, head, 2992 rcu_segcblist_n_cbs(&rdp->cblist)); 2993 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); 2994 } 2995 2996 /* 2997 * Handle any core-RCU processing required by a call_rcu() invocation. 2998 */ 2999 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, 3000 rcu_callback_t func, unsigned long flags) 3001 { 3002 rcutree_enqueue(rdp, head, func); 3003 /* 3004 * If called from an extended quiescent state, invoke the RCU 3005 * core in order to force a re-evaluation of RCU's idleness. 3006 */ 3007 if (!rcu_is_watching()) 3008 invoke_rcu_core(); 3009 3010 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ 3011 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id())) 3012 return; 3013 3014 /* 3015 * Force the grace period if too many callbacks or too long waiting. 3016 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state() 3017 * if some other CPU has recently done so. Also, don't bother 3018 * invoking rcu_force_quiescent_state() if the newly enqueued callback 3019 * is the only one waiting for a grace period to complete. 3020 */ 3021 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > 3022 rdp->qlen_last_fqs_check + qhimark)) { 3023 3024 /* Are we ignoring a completed grace period? */ 3025 note_gp_changes(rdp); 3026 3027 /* Start a new grace period if one not already started. */ 3028 if (!rcu_gp_in_progress()) { 3029 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); 3030 } else { 3031 /* Give the grace period a kick. */ 3032 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; 3033 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap && 3034 rcu_segcblist_first_pend_cb(&rdp->cblist) != head) 3035 rcu_force_quiescent_state(); 3036 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 3037 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); 3038 } 3039 } 3040 } 3041 3042 /* 3043 * RCU callback function to leak a callback. 3044 */ 3045 static void rcu_leak_callback(struct rcu_head *rhp) 3046 { 3047 } 3048 3049 /* 3050 * Check and if necessary update the leaf rcu_node structure's 3051 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 3052 * number of queued RCU callbacks. The caller must hold the leaf rcu_node 3053 * structure's ->lock. 3054 */ 3055 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp) 3056 { 3057 raw_lockdep_assert_held_rcu_node(rnp); 3058 if (qovld_calc <= 0) 3059 return; // Early boot and wildcard value set. 3060 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) 3061 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask); 3062 else 3063 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask); 3064 } 3065 3066 /* 3067 * Check and if necessary update the leaf rcu_node structure's 3068 * ->cbovldmask bit corresponding to the current CPU based on that CPU's 3069 * number of queued RCU callbacks. No locks need be held, but the 3070 * caller must have disabled interrupts. 3071 * 3072 * Note that this function ignores the possibility that there are a lot 3073 * of callbacks all of which have already seen the end of their respective 3074 * grace periods. This omission is due to the need for no-CBs CPUs to 3075 * be holding ->nocb_lock to do this check, which is too heavy for a 3076 * common-case operation. 3077 */ 3078 static void check_cb_ovld(struct rcu_data *rdp) 3079 { 3080 struct rcu_node *const rnp = rdp->mynode; 3081 3082 if (qovld_calc <= 0 || 3083 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) == 3084 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask))) 3085 return; // Early boot wildcard value or already set correctly. 3086 raw_spin_lock_rcu_node(rnp); 3087 check_cb_ovld_locked(rdp, rnp); 3088 raw_spin_unlock_rcu_node(rnp); 3089 } 3090 3091 static void 3092 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) 3093 { 3094 static atomic_t doublefrees; 3095 unsigned long flags; 3096 bool lazy; 3097 struct rcu_data *rdp; 3098 3099 /* Misaligned rcu_head! */ 3100 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); 3101 3102 if (debug_rcu_head_queue(head)) { 3103 /* 3104 * Probable double call_rcu(), so leak the callback. 3105 * Use rcu:rcu_callback trace event to find the previous 3106 * time callback was passed to call_rcu(). 3107 */ 3108 if (atomic_inc_return(&doublefrees) < 4) { 3109 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); 3110 mem_dump_obj(head); 3111 } 3112 WRITE_ONCE(head->func, rcu_leak_callback); 3113 return; 3114 } 3115 head->func = func; 3116 head->next = NULL; 3117 kasan_record_aux_stack_noalloc(head); 3118 local_irq_save(flags); 3119 rdp = this_cpu_ptr(&rcu_data); 3120 lazy = lazy_in && !rcu_async_should_hurry(); 3121 3122 /* Add the callback to our list. */ 3123 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { 3124 // This can trigger due to call_rcu() from offline CPU: 3125 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE); 3126 WARN_ON_ONCE(!rcu_is_watching()); 3127 // Very early boot, before rcu_init(). Initialize if needed 3128 // and then drop through to queue the callback. 3129 if (rcu_segcblist_empty(&rdp->cblist)) 3130 rcu_segcblist_init(&rdp->cblist); 3131 } 3132 3133 check_cb_ovld(rdp); 3134 3135 if (unlikely(rcu_rdp_is_offloaded(rdp))) 3136 call_rcu_nocb(rdp, head, func, flags, lazy); 3137 else 3138 call_rcu_core(rdp, head, func, flags); 3139 local_irq_restore(flags); 3140 } 3141 3142 #ifdef CONFIG_RCU_LAZY 3143 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF); 3144 module_param(enable_rcu_lazy, bool, 0444); 3145 3146 /** 3147 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and 3148 * flush all lazy callbacks (including the new one) to the main ->cblist while 3149 * doing so. 3150 * 3151 * @head: structure to be used for queueing the RCU updates. 3152 * @func: actual callback function to be invoked after the grace period 3153 * 3154 * The callback function will be invoked some time after a full grace 3155 * period elapses, in other words after all pre-existing RCU read-side 3156 * critical sections have completed. 3157 * 3158 * Use this API instead of call_rcu() if you don't want the callback to be 3159 * invoked after very long periods of time, which can happen on systems without 3160 * memory pressure and on systems which are lightly loaded or mostly idle. 3161 * This function will cause callbacks to be invoked sooner than later at the 3162 * expense of extra power. Other than that, this function is identical to, and 3163 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory 3164 * ordering and other functionality. 3165 */ 3166 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) 3167 { 3168 __call_rcu_common(head, func, false); 3169 } 3170 EXPORT_SYMBOL_GPL(call_rcu_hurry); 3171 #else 3172 #define enable_rcu_lazy false 3173 #endif 3174 3175 /** 3176 * call_rcu() - Queue an RCU callback for invocation after a grace period. 3177 * By default the callbacks are 'lazy' and are kept hidden from the main 3178 * ->cblist to prevent starting of grace periods too soon. 3179 * If you desire grace periods to start very soon, use call_rcu_hurry(). 3180 * 3181 * @head: structure to be used for queueing the RCU updates. 3182 * @func: actual callback function to be invoked after the grace period 3183 * 3184 * The callback function will be invoked some time after a full grace 3185 * period elapses, in other words after all pre-existing RCU read-side 3186 * critical sections have completed. However, the callback function 3187 * might well execute concurrently with RCU read-side critical sections 3188 * that started after call_rcu() was invoked. 3189 * 3190 * RCU read-side critical sections are delimited by rcu_read_lock() 3191 * and rcu_read_unlock(), and may be nested. In addition, but only in 3192 * v5.0 and later, regions of code across which interrupts, preemption, 3193 * or softirqs have been disabled also serve as RCU read-side critical 3194 * sections. This includes hardware interrupt handlers, softirq handlers, 3195 * and NMI handlers. 3196 * 3197 * Note that all CPUs must agree that the grace period extended beyond 3198 * all pre-existing RCU read-side critical section. On systems with more 3199 * than one CPU, this means that when "func()" is invoked, each CPU is 3200 * guaranteed to have executed a full memory barrier since the end of its 3201 * last RCU read-side critical section whose beginning preceded the call 3202 * to call_rcu(). It also means that each CPU executing an RCU read-side 3203 * critical section that continues beyond the start of "func()" must have 3204 * executed a memory barrier after the call_rcu() but before the beginning 3205 * of that RCU read-side critical section. Note that these guarantees 3206 * include CPUs that are offline, idle, or executing in user mode, as 3207 * well as CPUs that are executing in the kernel. 3208 * 3209 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 3210 * resulting RCU callback function "func()", then both CPU A and CPU B are 3211 * guaranteed to execute a full memory barrier during the time interval 3212 * between the call to call_rcu() and the invocation of "func()" -- even 3213 * if CPU A and CPU B are the same CPU (but again only if the system has 3214 * more than one CPU). 3215 * 3216 * Implementation of these memory-ordering guarantees is described here: 3217 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 3218 */ 3219 void call_rcu(struct rcu_head *head, rcu_callback_t func) 3220 { 3221 __call_rcu_common(head, func, enable_rcu_lazy); 3222 } 3223 EXPORT_SYMBOL_GPL(call_rcu); 3224 3225 /* Maximum number of jiffies to wait before draining a batch. */ 3226 #define KFREE_DRAIN_JIFFIES (5 * HZ) 3227 #define KFREE_N_BATCHES 2 3228 #define FREE_N_CHANNELS 2 3229 3230 /** 3231 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers 3232 * @list: List node. All blocks are linked between each other 3233 * @gp_snap: Snapshot of RCU state for objects placed to this bulk 3234 * @nr_records: Number of active pointers in the array 3235 * @records: Array of the kvfree_rcu() pointers 3236 */ 3237 struct kvfree_rcu_bulk_data { 3238 struct list_head list; 3239 struct rcu_gp_oldstate gp_snap; 3240 unsigned long nr_records; 3241 void *records[]; 3242 }; 3243 3244 /* 3245 * This macro defines how many entries the "records" array 3246 * will contain. It is based on the fact that the size of 3247 * kvfree_rcu_bulk_data structure becomes exactly one page. 3248 */ 3249 #define KVFREE_BULK_MAX_ENTR \ 3250 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *)) 3251 3252 /** 3253 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests 3254 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period 3255 * @head_free: List of kfree_rcu() objects waiting for a grace period 3256 * @head_free_gp_snap: Grace-period snapshot to check for attempted premature frees. 3257 * @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period 3258 * @krcp: Pointer to @kfree_rcu_cpu structure 3259 */ 3260 3261 struct kfree_rcu_cpu_work { 3262 struct rcu_work rcu_work; 3263 struct rcu_head *head_free; 3264 struct rcu_gp_oldstate head_free_gp_snap; 3265 struct list_head bulk_head_free[FREE_N_CHANNELS]; 3266 struct kfree_rcu_cpu *krcp; 3267 }; 3268 3269 /** 3270 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period 3271 * @head: List of kfree_rcu() objects not yet waiting for a grace period 3272 * @head_gp_snap: Snapshot of RCU state for objects placed to "@head" 3273 * @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period 3274 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period 3275 * @lock: Synchronize access to this structure 3276 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES 3277 * @initialized: The @rcu_work fields have been initialized 3278 * @head_count: Number of objects in rcu_head singular list 3279 * @bulk_count: Number of objects in bulk-list 3280 * @bkvcache: 3281 * A simple cache list that contains objects for reuse purpose. 3282 * In order to save some per-cpu space the list is singular. 3283 * Even though it is lockless an access has to be protected by the 3284 * per-cpu lock. 3285 * @page_cache_work: A work to refill the cache when it is empty 3286 * @backoff_page_cache_fill: Delay cache refills 3287 * @work_in_progress: Indicates that page_cache_work is running 3288 * @hrtimer: A hrtimer for scheduling a page_cache_work 3289 * @nr_bkv_objs: number of allocated objects at @bkvcache. 3290 * 3291 * This is a per-CPU structure. The reason that it is not included in 3292 * the rcu_data structure is to permit this code to be extracted from 3293 * the RCU files. Such extraction could allow further optimization of 3294 * the interactions with the slab allocators. 3295 */ 3296 struct kfree_rcu_cpu { 3297 // Objects queued on a linked list 3298 // through their rcu_head structures. 3299 struct rcu_head *head; 3300 unsigned long head_gp_snap; 3301 atomic_t head_count; 3302 3303 // Objects queued on a bulk-list. 3304 struct list_head bulk_head[FREE_N_CHANNELS]; 3305 atomic_t bulk_count[FREE_N_CHANNELS]; 3306 3307 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES]; 3308 raw_spinlock_t lock; 3309 struct delayed_work monitor_work; 3310 bool initialized; 3311 3312 struct delayed_work page_cache_work; 3313 atomic_t backoff_page_cache_fill; 3314 atomic_t work_in_progress; 3315 struct hrtimer hrtimer; 3316 3317 struct llist_head bkvcache; 3318 int nr_bkv_objs; 3319 }; 3320 3321 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = { 3322 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock), 3323 }; 3324 3325 static __always_inline void 3326 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead) 3327 { 3328 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3329 int i; 3330 3331 for (i = 0; i < bhead->nr_records; i++) 3332 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i])); 3333 #endif 3334 } 3335 3336 static inline struct kfree_rcu_cpu * 3337 krc_this_cpu_lock(unsigned long *flags) 3338 { 3339 struct kfree_rcu_cpu *krcp; 3340 3341 local_irq_save(*flags); // For safely calling this_cpu_ptr(). 3342 krcp = this_cpu_ptr(&krc); 3343 raw_spin_lock(&krcp->lock); 3344 3345 return krcp; 3346 } 3347 3348 static inline void 3349 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) 3350 { 3351 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3352 } 3353 3354 static inline struct kvfree_rcu_bulk_data * 3355 get_cached_bnode(struct kfree_rcu_cpu *krcp) 3356 { 3357 if (!krcp->nr_bkv_objs) 3358 return NULL; 3359 3360 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1); 3361 return (struct kvfree_rcu_bulk_data *) 3362 llist_del_first(&krcp->bkvcache); 3363 } 3364 3365 static inline bool 3366 put_cached_bnode(struct kfree_rcu_cpu *krcp, 3367 struct kvfree_rcu_bulk_data *bnode) 3368 { 3369 // Check the limit. 3370 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) 3371 return false; 3372 3373 llist_add((struct llist_node *) bnode, &krcp->bkvcache); 3374 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1); 3375 return true; 3376 } 3377 3378 static int 3379 drain_page_cache(struct kfree_rcu_cpu *krcp) 3380 { 3381 unsigned long flags; 3382 struct llist_node *page_list, *pos, *n; 3383 int freed = 0; 3384 3385 if (!rcu_min_cached_objs) 3386 return 0; 3387 3388 raw_spin_lock_irqsave(&krcp->lock, flags); 3389 page_list = llist_del_all(&krcp->bkvcache); 3390 WRITE_ONCE(krcp->nr_bkv_objs, 0); 3391 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3392 3393 llist_for_each_safe(pos, n, page_list) { 3394 free_page((unsigned long)pos); 3395 freed++; 3396 } 3397 3398 return freed; 3399 } 3400 3401 static void 3402 kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp, 3403 struct kvfree_rcu_bulk_data *bnode, int idx) 3404 { 3405 unsigned long flags; 3406 int i; 3407 3408 if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) { 3409 debug_rcu_bhead_unqueue(bnode); 3410 rcu_lock_acquire(&rcu_callback_map); 3411 if (idx == 0) { // kmalloc() / kfree(). 3412 trace_rcu_invoke_kfree_bulk_callback( 3413 rcu_state.name, bnode->nr_records, 3414 bnode->records); 3415 3416 kfree_bulk(bnode->nr_records, bnode->records); 3417 } else { // vmalloc() / vfree(). 3418 for (i = 0; i < bnode->nr_records; i++) { 3419 trace_rcu_invoke_kvfree_callback( 3420 rcu_state.name, bnode->records[i], 0); 3421 3422 vfree(bnode->records[i]); 3423 } 3424 } 3425 rcu_lock_release(&rcu_callback_map); 3426 } 3427 3428 raw_spin_lock_irqsave(&krcp->lock, flags); 3429 if (put_cached_bnode(krcp, bnode)) 3430 bnode = NULL; 3431 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3432 3433 if (bnode) 3434 free_page((unsigned long) bnode); 3435 3436 cond_resched_tasks_rcu_qs(); 3437 } 3438 3439 static void 3440 kvfree_rcu_list(struct rcu_head *head) 3441 { 3442 struct rcu_head *next; 3443 3444 for (; head; head = next) { 3445 void *ptr = (void *) head->func; 3446 unsigned long offset = (void *) head - ptr; 3447 3448 next = head->next; 3449 debug_rcu_head_unqueue((struct rcu_head *)ptr); 3450 rcu_lock_acquire(&rcu_callback_map); 3451 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); 3452 3453 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) 3454 kvfree(ptr); 3455 3456 rcu_lock_release(&rcu_callback_map); 3457 cond_resched_tasks_rcu_qs(); 3458 } 3459 } 3460 3461 /* 3462 * This function is invoked in workqueue context after a grace period. 3463 * It frees all the objects queued on ->bulk_head_free or ->head_free. 3464 */ 3465 static void kfree_rcu_work(struct work_struct *work) 3466 { 3467 unsigned long flags; 3468 struct kvfree_rcu_bulk_data *bnode, *n; 3469 struct list_head bulk_head[FREE_N_CHANNELS]; 3470 struct rcu_head *head; 3471 struct kfree_rcu_cpu *krcp; 3472 struct kfree_rcu_cpu_work *krwp; 3473 struct rcu_gp_oldstate head_gp_snap; 3474 int i; 3475 3476 krwp = container_of(to_rcu_work(work), 3477 struct kfree_rcu_cpu_work, rcu_work); 3478 krcp = krwp->krcp; 3479 3480 raw_spin_lock_irqsave(&krcp->lock, flags); 3481 // Channels 1 and 2. 3482 for (i = 0; i < FREE_N_CHANNELS; i++) 3483 list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]); 3484 3485 // Channel 3. 3486 head = krwp->head_free; 3487 krwp->head_free = NULL; 3488 head_gp_snap = krwp->head_free_gp_snap; 3489 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3490 3491 // Handle the first two channels. 3492 for (i = 0; i < FREE_N_CHANNELS; i++) { 3493 // Start from the tail page, so a GP is likely passed for it. 3494 list_for_each_entry_safe(bnode, n, &bulk_head[i], list) 3495 kvfree_rcu_bulk(krcp, bnode, i); 3496 } 3497 3498 /* 3499 * This is used when the "bulk" path can not be used for the 3500 * double-argument of kvfree_rcu(). This happens when the 3501 * page-cache is empty, which means that objects are instead 3502 * queued on a linked list through their rcu_head structures. 3503 * This list is named "Channel 3". 3504 */ 3505 if (head && !WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&head_gp_snap))) 3506 kvfree_rcu_list(head); 3507 } 3508 3509 static bool 3510 need_offload_krc(struct kfree_rcu_cpu *krcp) 3511 { 3512 int i; 3513 3514 for (i = 0; i < FREE_N_CHANNELS; i++) 3515 if (!list_empty(&krcp->bulk_head[i])) 3516 return true; 3517 3518 return !!READ_ONCE(krcp->head); 3519 } 3520 3521 static bool 3522 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp) 3523 { 3524 int i; 3525 3526 for (i = 0; i < FREE_N_CHANNELS; i++) 3527 if (!list_empty(&krwp->bulk_head_free[i])) 3528 return true; 3529 3530 return !!krwp->head_free; 3531 } 3532 3533 static int krc_count(struct kfree_rcu_cpu *krcp) 3534 { 3535 int sum = atomic_read(&krcp->head_count); 3536 int i; 3537 3538 for (i = 0; i < FREE_N_CHANNELS; i++) 3539 sum += atomic_read(&krcp->bulk_count[i]); 3540 3541 return sum; 3542 } 3543 3544 static void 3545 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) 3546 { 3547 long delay, delay_left; 3548 3549 delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES; 3550 if (delayed_work_pending(&krcp->monitor_work)) { 3551 delay_left = krcp->monitor_work.timer.expires - jiffies; 3552 if (delay < delay_left) 3553 mod_delayed_work(system_wq, &krcp->monitor_work, delay); 3554 return; 3555 } 3556 queue_delayed_work(system_wq, &krcp->monitor_work, delay); 3557 } 3558 3559 static void 3560 kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp) 3561 { 3562 struct list_head bulk_ready[FREE_N_CHANNELS]; 3563 struct kvfree_rcu_bulk_data *bnode, *n; 3564 struct rcu_head *head_ready = NULL; 3565 unsigned long flags; 3566 int i; 3567 3568 raw_spin_lock_irqsave(&krcp->lock, flags); 3569 for (i = 0; i < FREE_N_CHANNELS; i++) { 3570 INIT_LIST_HEAD(&bulk_ready[i]); 3571 3572 list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) { 3573 if (!poll_state_synchronize_rcu_full(&bnode->gp_snap)) 3574 break; 3575 3576 atomic_sub(bnode->nr_records, &krcp->bulk_count[i]); 3577 list_move(&bnode->list, &bulk_ready[i]); 3578 } 3579 } 3580 3581 if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) { 3582 head_ready = krcp->head; 3583 atomic_set(&krcp->head_count, 0); 3584 WRITE_ONCE(krcp->head, NULL); 3585 } 3586 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3587 3588 for (i = 0; i < FREE_N_CHANNELS; i++) { 3589 list_for_each_entry_safe(bnode, n, &bulk_ready[i], list) 3590 kvfree_rcu_bulk(krcp, bnode, i); 3591 } 3592 3593 if (head_ready) 3594 kvfree_rcu_list(head_ready); 3595 } 3596 3597 /* 3598 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout. 3599 */ 3600 static void kfree_rcu_monitor(struct work_struct *work) 3601 { 3602 struct kfree_rcu_cpu *krcp = container_of(work, 3603 struct kfree_rcu_cpu, monitor_work.work); 3604 unsigned long flags; 3605 int i, j; 3606 3607 // Drain ready for reclaim. 3608 kvfree_rcu_drain_ready(krcp); 3609 3610 raw_spin_lock_irqsave(&krcp->lock, flags); 3611 3612 // Attempt to start a new batch. 3613 for (i = 0; i < KFREE_N_BATCHES; i++) { 3614 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); 3615 3616 // Try to detach bulk_head or head and attach it, only when 3617 // all channels are free. Any channel is not free means at krwp 3618 // there is on-going rcu work to handle krwp's free business. 3619 if (need_wait_for_krwp_work(krwp)) 3620 continue; 3621 3622 // kvfree_rcu_drain_ready() might handle this krcp, if so give up. 3623 if (need_offload_krc(krcp)) { 3624 // Channel 1 corresponds to the SLAB-pointer bulk path. 3625 // Channel 2 corresponds to vmalloc-pointer bulk path. 3626 for (j = 0; j < FREE_N_CHANNELS; j++) { 3627 if (list_empty(&krwp->bulk_head_free[j])) { 3628 atomic_set(&krcp->bulk_count[j], 0); 3629 list_replace_init(&krcp->bulk_head[j], 3630 &krwp->bulk_head_free[j]); 3631 } 3632 } 3633 3634 // Channel 3 corresponds to both SLAB and vmalloc 3635 // objects queued on the linked list. 3636 if (!krwp->head_free) { 3637 krwp->head_free = krcp->head; 3638 get_state_synchronize_rcu_full(&krwp->head_free_gp_snap); 3639 atomic_set(&krcp->head_count, 0); 3640 WRITE_ONCE(krcp->head, NULL); 3641 } 3642 3643 // One work is per one batch, so there are three 3644 // "free channels", the batch can handle. It can 3645 // be that the work is in the pending state when 3646 // channels have been detached following by each 3647 // other. 3648 queue_rcu_work(system_wq, &krwp->rcu_work); 3649 } 3650 } 3651 3652 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3653 3654 // If there is nothing to detach, it means that our job is 3655 // successfully done here. In case of having at least one 3656 // of the channels that is still busy we should rearm the 3657 // work to repeat an attempt. Because previous batches are 3658 // still in progress. 3659 if (need_offload_krc(krcp)) 3660 schedule_delayed_monitor_work(krcp); 3661 } 3662 3663 static enum hrtimer_restart 3664 schedule_page_work_fn(struct hrtimer *t) 3665 { 3666 struct kfree_rcu_cpu *krcp = 3667 container_of(t, struct kfree_rcu_cpu, hrtimer); 3668 3669 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0); 3670 return HRTIMER_NORESTART; 3671 } 3672 3673 static void fill_page_cache_func(struct work_struct *work) 3674 { 3675 struct kvfree_rcu_bulk_data *bnode; 3676 struct kfree_rcu_cpu *krcp = 3677 container_of(work, struct kfree_rcu_cpu, 3678 page_cache_work.work); 3679 unsigned long flags; 3680 int nr_pages; 3681 bool pushed; 3682 int i; 3683 3684 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? 3685 1 : rcu_min_cached_objs; 3686 3687 for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) { 3688 bnode = (struct kvfree_rcu_bulk_data *) 3689 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 3690 3691 if (!bnode) 3692 break; 3693 3694 raw_spin_lock_irqsave(&krcp->lock, flags); 3695 pushed = put_cached_bnode(krcp, bnode); 3696 raw_spin_unlock_irqrestore(&krcp->lock, flags); 3697 3698 if (!pushed) { 3699 free_page((unsigned long) bnode); 3700 break; 3701 } 3702 } 3703 3704 atomic_set(&krcp->work_in_progress, 0); 3705 atomic_set(&krcp->backoff_page_cache_fill, 0); 3706 } 3707 3708 static void 3709 run_page_cache_worker(struct kfree_rcu_cpu *krcp) 3710 { 3711 // If cache disabled, bail out. 3712 if (!rcu_min_cached_objs) 3713 return; 3714 3715 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && 3716 !atomic_xchg(&krcp->work_in_progress, 1)) { 3717 if (atomic_read(&krcp->backoff_page_cache_fill)) { 3718 queue_delayed_work(system_wq, 3719 &krcp->page_cache_work, 3720 msecs_to_jiffies(rcu_delay_page_cache_fill_msec)); 3721 } else { 3722 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3723 krcp->hrtimer.function = schedule_page_work_fn; 3724 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); 3725 } 3726 } 3727 } 3728 3729 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock() 3730 // state specified by flags. If can_alloc is true, the caller must 3731 // be schedulable and not be holding any locks or mutexes that might be 3732 // acquired by the memory allocator or anything that it might invoke. 3733 // Returns true if ptr was successfully recorded, else the caller must 3734 // use a fallback. 3735 static inline bool 3736 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, 3737 unsigned long *flags, void *ptr, bool can_alloc) 3738 { 3739 struct kvfree_rcu_bulk_data *bnode; 3740 int idx; 3741 3742 *krcp = krc_this_cpu_lock(flags); 3743 if (unlikely(!(*krcp)->initialized)) 3744 return false; 3745 3746 idx = !!is_vmalloc_addr(ptr); 3747 bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx], 3748 struct kvfree_rcu_bulk_data, list); 3749 3750 /* Check if a new block is required. */ 3751 if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) { 3752 bnode = get_cached_bnode(*krcp); 3753 if (!bnode && can_alloc) { 3754 krc_this_cpu_unlock(*krcp, *flags); 3755 3756 // __GFP_NORETRY - allows a light-weight direct reclaim 3757 // what is OK from minimizing of fallback hitting point of 3758 // view. Apart of that it forbids any OOM invoking what is 3759 // also beneficial since we are about to release memory soon. 3760 // 3761 // __GFP_NOMEMALLOC - prevents from consuming of all the 3762 // memory reserves. Please note we have a fallback path. 3763 // 3764 // __GFP_NOWARN - it is supposed that an allocation can 3765 // be failed under low memory or high memory pressure 3766 // scenarios. 3767 bnode = (struct kvfree_rcu_bulk_data *) 3768 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 3769 raw_spin_lock_irqsave(&(*krcp)->lock, *flags); 3770 } 3771 3772 if (!bnode) 3773 return false; 3774 3775 // Initialize the new block and attach it. 3776 bnode->nr_records = 0; 3777 list_add(&bnode->list, &(*krcp)->bulk_head[idx]); 3778 } 3779 3780 // Finally insert and update the GP for this page. 3781 bnode->records[bnode->nr_records++] = ptr; 3782 get_state_synchronize_rcu_full(&bnode->gp_snap); 3783 atomic_inc(&(*krcp)->bulk_count[idx]); 3784 3785 return true; 3786 } 3787 3788 /* 3789 * Queue a request for lazy invocation of the appropriate free routine 3790 * after a grace period. Please note that three paths are maintained, 3791 * two for the common case using arrays of pointers and a third one that 3792 * is used only when the main paths cannot be used, for example, due to 3793 * memory pressure. 3794 * 3795 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained 3796 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will 3797 * be free'd in workqueue context. This allows us to: batch requests together to 3798 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load. 3799 */ 3800 void kvfree_call_rcu(struct rcu_head *head, void *ptr) 3801 { 3802 unsigned long flags; 3803 struct kfree_rcu_cpu *krcp; 3804 bool success; 3805 3806 /* 3807 * Please note there is a limitation for the head-less 3808 * variant, that is why there is a clear rule for such 3809 * objects: it can be used from might_sleep() context 3810 * only. For other places please embed an rcu_head to 3811 * your data. 3812 */ 3813 if (!head) 3814 might_sleep(); 3815 3816 // Queue the object but don't yet schedule the batch. 3817 if (debug_rcu_head_queue(ptr)) { 3818 // Probable double kfree_rcu(), just leak. 3819 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", 3820 __func__, head); 3821 3822 // Mark as success and leave. 3823 return; 3824 } 3825 3826 kasan_record_aux_stack_noalloc(ptr); 3827 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); 3828 if (!success) { 3829 run_page_cache_worker(krcp); 3830 3831 if (head == NULL) 3832 // Inline if kvfree_rcu(one_arg) call. 3833 goto unlock_return; 3834 3835 head->func = ptr; 3836 head->next = krcp->head; 3837 WRITE_ONCE(krcp->head, head); 3838 atomic_inc(&krcp->head_count); 3839 3840 // Take a snapshot for this krcp. 3841 krcp->head_gp_snap = get_state_synchronize_rcu(); 3842 success = true; 3843 } 3844 3845 /* 3846 * The kvfree_rcu() caller considers the pointer freed at this point 3847 * and likely removes any references to it. Since the actual slab 3848 * freeing (and kmemleak_free()) is deferred, tell kmemleak to ignore 3849 * this object (no scanning or false positives reporting). 3850 */ 3851 kmemleak_ignore(ptr); 3852 3853 // Set timer to drain after KFREE_DRAIN_JIFFIES. 3854 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING) 3855 schedule_delayed_monitor_work(krcp); 3856 3857 unlock_return: 3858 krc_this_cpu_unlock(krcp, flags); 3859 3860 /* 3861 * Inline kvfree() after synchronize_rcu(). We can do 3862 * it from might_sleep() context only, so the current 3863 * CPU can pass the QS state. 3864 */ 3865 if (!success) { 3866 debug_rcu_head_unqueue((struct rcu_head *) ptr); 3867 synchronize_rcu(); 3868 kvfree(ptr); 3869 } 3870 } 3871 EXPORT_SYMBOL_GPL(kvfree_call_rcu); 3872 3873 static unsigned long 3874 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 3875 { 3876 int cpu; 3877 unsigned long count = 0; 3878 3879 /* Snapshot count of all CPUs */ 3880 for_each_possible_cpu(cpu) { 3881 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3882 3883 count += krc_count(krcp); 3884 count += READ_ONCE(krcp->nr_bkv_objs); 3885 atomic_set(&krcp->backoff_page_cache_fill, 1); 3886 } 3887 3888 return count == 0 ? SHRINK_EMPTY : count; 3889 } 3890 3891 static unsigned long 3892 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 3893 { 3894 int cpu, freed = 0; 3895 3896 for_each_possible_cpu(cpu) { 3897 int count; 3898 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3899 3900 count = krc_count(krcp); 3901 count += drain_page_cache(krcp); 3902 kfree_rcu_monitor(&krcp->monitor_work.work); 3903 3904 sc->nr_to_scan -= count; 3905 freed += count; 3906 3907 if (sc->nr_to_scan <= 0) 3908 break; 3909 } 3910 3911 return freed == 0 ? SHRINK_STOP : freed; 3912 } 3913 3914 void __init kfree_rcu_scheduler_running(void) 3915 { 3916 int cpu; 3917 3918 for_each_possible_cpu(cpu) { 3919 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 3920 3921 if (need_offload_krc(krcp)) 3922 schedule_delayed_monitor_work(krcp); 3923 } 3924 } 3925 3926 /* 3927 * During early boot, any blocking grace-period wait automatically 3928 * implies a grace period. 3929 * 3930 * Later on, this could in theory be the case for kernels built with 3931 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this 3932 * is not a common case. Furthermore, this optimization would cause 3933 * the rcu_gp_oldstate structure to expand by 50%, so this potential 3934 * grace-period optimization is ignored once the scheduler is running. 3935 */ 3936 static int rcu_blocking_is_gp(void) 3937 { 3938 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) { 3939 might_sleep(); 3940 return false; 3941 } 3942 return true; 3943 } 3944 3945 /* 3946 * Helper function for the synchronize_rcu() API. 3947 */ 3948 static void synchronize_rcu_normal(void) 3949 { 3950 struct rcu_synchronize rs; 3951 3952 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request")); 3953 3954 if (!READ_ONCE(rcu_normal_wake_from_gp)) { 3955 wait_rcu_gp(call_rcu_hurry); 3956 goto trace_complete_out; 3957 } 3958 3959 init_rcu_head_on_stack(&rs.head); 3960 init_completion(&rs.completion); 3961 3962 /* 3963 * This code might be preempted, therefore take a GP 3964 * snapshot before adding a request. 3965 */ 3966 if (IS_ENABLED(CONFIG_PROVE_RCU)) 3967 rs.head.func = (void *) get_state_synchronize_rcu(); 3968 3969 rcu_sr_normal_add_req(&rs); 3970 3971 /* Kick a GP and start waiting. */ 3972 (void) start_poll_synchronize_rcu(); 3973 3974 /* Now we can wait. */ 3975 wait_for_completion(&rs.completion); 3976 destroy_rcu_head_on_stack(&rs.head); 3977 3978 trace_complete_out: 3979 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete")); 3980 } 3981 3982 /** 3983 * synchronize_rcu - wait until a grace period has elapsed. 3984 * 3985 * Control will return to the caller some time after a full grace 3986 * period has elapsed, in other words after all currently executing RCU 3987 * read-side critical sections have completed. Note, however, that 3988 * upon return from synchronize_rcu(), the caller might well be executing 3989 * concurrently with new RCU read-side critical sections that began while 3990 * synchronize_rcu() was waiting. 3991 * 3992 * RCU read-side critical sections are delimited by rcu_read_lock() 3993 * and rcu_read_unlock(), and may be nested. In addition, but only in 3994 * v5.0 and later, regions of code across which interrupts, preemption, 3995 * or softirqs have been disabled also serve as RCU read-side critical 3996 * sections. This includes hardware interrupt handlers, softirq handlers, 3997 * and NMI handlers. 3998 * 3999 * Note that this guarantee implies further memory-ordering guarantees. 4000 * On systems with more than one CPU, when synchronize_rcu() returns, 4001 * each CPU is guaranteed to have executed a full memory barrier since 4002 * the end of its last RCU read-side critical section whose beginning 4003 * preceded the call to synchronize_rcu(). In addition, each CPU having 4004 * an RCU read-side critical section that extends beyond the return from 4005 * synchronize_rcu() is guaranteed to have executed a full memory barrier 4006 * after the beginning of synchronize_rcu() and before the beginning of 4007 * that RCU read-side critical section. Note that these guarantees include 4008 * CPUs that are offline, idle, or executing in user mode, as well as CPUs 4009 * that are executing in the kernel. 4010 * 4011 * Furthermore, if CPU A invoked synchronize_rcu(), which returned 4012 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 4013 * to have executed a full memory barrier during the execution of 4014 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but 4015 * again only if the system has more than one CPU). 4016 * 4017 * Implementation of these memory-ordering guarantees is described here: 4018 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst. 4019 */ 4020 void synchronize_rcu(void) 4021 { 4022 unsigned long flags; 4023 struct rcu_node *rnp; 4024 4025 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || 4026 lock_is_held(&rcu_lock_map) || 4027 lock_is_held(&rcu_sched_lock_map), 4028 "Illegal synchronize_rcu() in RCU read-side critical section"); 4029 if (!rcu_blocking_is_gp()) { 4030 if (rcu_gp_is_expedited()) 4031 synchronize_rcu_expedited(); 4032 else 4033 synchronize_rcu_normal(); 4034 return; 4035 } 4036 4037 // Context allows vacuous grace periods. 4038 // Note well that this code runs with !PREEMPT && !SMP. 4039 // In addition, all code that advances grace periods runs at 4040 // process level. Therefore, this normal GP overlaps with other 4041 // normal GPs only by being fully nested within them, which allows 4042 // reuse of ->gp_seq_polled_snap. 4043 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap); 4044 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap); 4045 4046 // Update the normal grace-period counters to record 4047 // this grace period, but only those used by the boot CPU. 4048 // The rcu_scheduler_starting() will take care of the rest of 4049 // these counters. 4050 local_irq_save(flags); 4051 WARN_ON_ONCE(num_online_cpus() > 1); 4052 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT); 4053 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent) 4054 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 4055 local_irq_restore(flags); 4056 } 4057 EXPORT_SYMBOL_GPL(synchronize_rcu); 4058 4059 /** 4060 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie 4061 * @rgosp: Place to put state cookie 4062 * 4063 * Stores into @rgosp a value that will always be treated by functions 4064 * like poll_state_synchronize_rcu_full() as a cookie whose grace period 4065 * has already completed. 4066 */ 4067 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 4068 { 4069 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; 4070 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED; 4071 } 4072 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full); 4073 4074 /** 4075 * get_state_synchronize_rcu - Snapshot current RCU state 4076 * 4077 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 4078 * or poll_state_synchronize_rcu() to determine whether or not a full 4079 * grace period has elapsed in the meantime. 4080 */ 4081 unsigned long get_state_synchronize_rcu(void) 4082 { 4083 /* 4084 * Any prior manipulation of RCU-protected data must happen 4085 * before the load from ->gp_seq. 4086 */ 4087 smp_mb(); /* ^^^ */ 4088 return rcu_seq_snap(&rcu_state.gp_seq_polled); 4089 } 4090 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 4091 4092 /** 4093 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited 4094 * @rgosp: location to place combined normal/expedited grace-period state 4095 * 4096 * Places the normal and expedited grace-period states in @rgosp. This 4097 * state value can be passed to a later call to cond_synchronize_rcu_full() 4098 * or poll_state_synchronize_rcu_full() to determine whether or not a 4099 * grace period (whether normal or expedited) has elapsed in the meantime. 4100 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned 4101 * long, but is guaranteed to see all grace periods. In contrast, the 4102 * combined state occupies less memory, but can sometimes fail to take 4103 * grace periods into account. 4104 * 4105 * This does not guarantee that the needed grace period will actually 4106 * start. 4107 */ 4108 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 4109 { 4110 struct rcu_node *rnp = rcu_get_root(); 4111 4112 /* 4113 * Any prior manipulation of RCU-protected data must happen 4114 * before the loads from ->gp_seq and ->expedited_sequence. 4115 */ 4116 smp_mb(); /* ^^^ */ 4117 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq); 4118 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence); 4119 } 4120 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full); 4121 4122 /* 4123 * Helper function for start_poll_synchronize_rcu() and 4124 * start_poll_synchronize_rcu_full(). 4125 */ 4126 static void start_poll_synchronize_rcu_common(void) 4127 { 4128 unsigned long flags; 4129 bool needwake; 4130 struct rcu_data *rdp; 4131 struct rcu_node *rnp; 4132 4133 lockdep_assert_irqs_enabled(); 4134 local_irq_save(flags); 4135 rdp = this_cpu_ptr(&rcu_data); 4136 rnp = rdp->mynode; 4137 raw_spin_lock_rcu_node(rnp); // irqs already disabled. 4138 // Note it is possible for a grace period to have elapsed between 4139 // the above call to get_state_synchronize_rcu() and the below call 4140 // to rcu_seq_snap. This is OK, the worst that happens is that we 4141 // get a grace period that no one needed. These accesses are ordered 4142 // by smp_mb(), and we are accessing them in the opposite order 4143 // from which they are updated at grace-period start, as required. 4144 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq)); 4145 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4146 if (needwake) 4147 rcu_gp_kthread_wake(); 4148 } 4149 4150 /** 4151 * start_poll_synchronize_rcu - Snapshot and start RCU grace period 4152 * 4153 * Returns a cookie that is used by a later call to cond_synchronize_rcu() 4154 * or poll_state_synchronize_rcu() to determine whether or not a full 4155 * grace period has elapsed in the meantime. If the needed grace period 4156 * is not already slated to start, notifies RCU core of the need for that 4157 * grace period. 4158 * 4159 * Interrupts must be enabled for the case where it is necessary to awaken 4160 * the grace-period kthread. 4161 */ 4162 unsigned long start_poll_synchronize_rcu(void) 4163 { 4164 unsigned long gp_seq = get_state_synchronize_rcu(); 4165 4166 start_poll_synchronize_rcu_common(); 4167 return gp_seq; 4168 } 4169 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); 4170 4171 /** 4172 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period 4173 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 4174 * 4175 * Places the normal and expedited grace-period states in *@rgos. This 4176 * state value can be passed to a later call to cond_synchronize_rcu_full() 4177 * or poll_state_synchronize_rcu_full() to determine whether or not a 4178 * grace period (whether normal or expedited) has elapsed in the meantime. 4179 * If the needed grace period is not already slated to start, notifies 4180 * RCU core of the need for that grace period. 4181 * 4182 * Interrupts must be enabled for the case where it is necessary to awaken 4183 * the grace-period kthread. 4184 */ 4185 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 4186 { 4187 get_state_synchronize_rcu_full(rgosp); 4188 4189 start_poll_synchronize_rcu_common(); 4190 } 4191 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full); 4192 4193 /** 4194 * poll_state_synchronize_rcu - Has the specified RCU grace period completed? 4195 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu() 4196 * 4197 * If a full RCU grace period has elapsed since the earlier call from 4198 * which @oldstate was obtained, return @true, otherwise return @false. 4199 * If @false is returned, it is the caller's responsibility to invoke this 4200 * function later on until it does return @true. Alternatively, the caller 4201 * can explicitly wait for a grace period, for example, by passing @oldstate 4202 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited() 4203 * on the one hand or by directly invoking either synchronize_rcu() or 4204 * synchronize_rcu_expedited() on the other. 4205 * 4206 * Yes, this function does not take counter wrap into account. 4207 * But counter wrap is harmless. If the counter wraps, we have waited for 4208 * more than a billion grace periods (and way more on a 64-bit system!). 4209 * Those needing to keep old state values for very long time periods 4210 * (many hours even on 32-bit systems) should check them occasionally and 4211 * either refresh them or set a flag indicating that the grace period has 4212 * completed. Alternatively, they can use get_completed_synchronize_rcu() 4213 * to get a guaranteed-completed grace-period state. 4214 * 4215 * In addition, because oldstate compresses the grace-period state for 4216 * both normal and expedited grace periods into a single unsigned long, 4217 * it can miss a grace period when synchronize_rcu() runs concurrently 4218 * with synchronize_rcu_expedited(). If this is unacceptable, please 4219 * instead use the _full() variant of these polling APIs. 4220 * 4221 * This function provides the same memory-ordering guarantees that 4222 * would be provided by a synchronize_rcu() that was invoked at the call 4223 * to the function that provided @oldstate, and that returned at the end 4224 * of this function. 4225 */ 4226 bool poll_state_synchronize_rcu(unsigned long oldstate) 4227 { 4228 if (oldstate == RCU_GET_STATE_COMPLETED || 4229 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) { 4230 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 4231 return true; 4232 } 4233 return false; 4234 } 4235 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); 4236 4237 /** 4238 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed? 4239 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full() 4240 * 4241 * If a full RCU grace period has elapsed since the earlier call from 4242 * which *rgosp was obtained, return @true, otherwise return @false. 4243 * If @false is returned, it is the caller's responsibility to invoke this 4244 * function later on until it does return @true. Alternatively, the caller 4245 * can explicitly wait for a grace period, for example, by passing @rgosp 4246 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu(). 4247 * 4248 * Yes, this function does not take counter wrap into account. 4249 * But counter wrap is harmless. If the counter wraps, we have waited 4250 * for more than a billion grace periods (and way more on a 64-bit 4251 * system!). Those needing to keep rcu_gp_oldstate values for very 4252 * long time periods (many hours even on 32-bit systems) should check 4253 * them occasionally and either refresh them or set a flag indicating 4254 * that the grace period has completed. Alternatively, they can use 4255 * get_completed_synchronize_rcu_full() to get a guaranteed-completed 4256 * grace-period state. 4257 * 4258 * This function provides the same memory-ordering guarantees that would 4259 * be provided by a synchronize_rcu() that was invoked at the call to 4260 * the function that provided @rgosp, and that returned at the end of this 4261 * function. And this guarantee requires that the root rcu_node structure's 4262 * ->gp_seq field be checked instead of that of the rcu_state structure. 4263 * The problem is that the just-ending grace-period's callbacks can be 4264 * invoked between the time that the root rcu_node structure's ->gp_seq 4265 * field is updated and the time that the rcu_state structure's ->gp_seq 4266 * field is updated. Therefore, if a single synchronize_rcu() is to 4267 * cause a subsequent poll_state_synchronize_rcu_full() to return @true, 4268 * then the root rcu_node structure is the one that needs to be polled. 4269 */ 4270 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 4271 { 4272 struct rcu_node *rnp = rcu_get_root(); 4273 4274 smp_mb(); // Order against root rcu_node structure grace-period cleanup. 4275 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED || 4276 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) || 4277 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED || 4278 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) { 4279 smp_mb(); /* Ensure GP ends before subsequent accesses. */ 4280 return true; 4281 } 4282 return false; 4283 } 4284 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full); 4285 4286 /** 4287 * cond_synchronize_rcu - Conditionally wait for an RCU grace period 4288 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() 4289 * 4290 * If a full RCU grace period has elapsed since the earlier call to 4291 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return. 4292 * Otherwise, invoke synchronize_rcu() to wait for a full grace period. 4293 * 4294 * Yes, this function does not take counter wrap into account. 4295 * But counter wrap is harmless. If the counter wraps, we have waited for 4296 * more than 2 billion grace periods (and way more on a 64-bit system!), 4297 * so waiting for a couple of additional grace periods should be just fine. 4298 * 4299 * This function provides the same memory-ordering guarantees that 4300 * would be provided by a synchronize_rcu() that was invoked at the call 4301 * to the function that provided @oldstate and that returned at the end 4302 * of this function. 4303 */ 4304 void cond_synchronize_rcu(unsigned long oldstate) 4305 { 4306 if (!poll_state_synchronize_rcu(oldstate)) 4307 synchronize_rcu(); 4308 } 4309 EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 4310 4311 /** 4312 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period 4313 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() 4314 * 4315 * If a full RCU grace period has elapsed since the call to 4316 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 4317 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was 4318 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait 4319 * for a full grace period. 4320 * 4321 * Yes, this function does not take counter wrap into account. 4322 * But counter wrap is harmless. If the counter wraps, we have waited for 4323 * more than 2 billion grace periods (and way more on a 64-bit system!), 4324 * so waiting for a couple of additional grace periods should be just fine. 4325 * 4326 * This function provides the same memory-ordering guarantees that 4327 * would be provided by a synchronize_rcu() that was invoked at the call 4328 * to the function that provided @rgosp and that returned at the end of 4329 * this function. 4330 */ 4331 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) 4332 { 4333 if (!poll_state_synchronize_rcu_full(rgosp)) 4334 synchronize_rcu(); 4335 } 4336 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full); 4337 4338 /* 4339 * Check to see if there is any immediate RCU-related work to be done by 4340 * the current CPU, returning 1 if so and zero otherwise. The checks are 4341 * in order of increasing expense: checks that can be carried out against 4342 * CPU-local state are performed first. However, we must check for CPU 4343 * stalls first, else we might not get a chance. 4344 */ 4345 static int rcu_pending(int user) 4346 { 4347 bool gp_in_progress; 4348 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 4349 struct rcu_node *rnp = rdp->mynode; 4350 4351 lockdep_assert_irqs_disabled(); 4352 4353 /* Check for CPU stalls, if enabled. */ 4354 check_cpu_stall(rdp); 4355 4356 /* Does this CPU need a deferred NOCB wakeup? */ 4357 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE)) 4358 return 1; 4359 4360 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */ 4361 gp_in_progress = rcu_gp_in_progress(); 4362 if ((user || rcu_is_cpu_rrupt_from_idle() || 4363 (gp_in_progress && 4364 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + 4365 nohz_full_patience_delay_jiffies))) && 4366 rcu_nohz_full_cpu()) 4367 return 0; 4368 4369 /* Is the RCU core waiting for a quiescent state from this CPU? */ 4370 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress) 4371 return 1; 4372 4373 /* Does this CPU have callbacks ready to invoke? */ 4374 if (!rcu_rdp_is_offloaded(rdp) && 4375 rcu_segcblist_ready_cbs(&rdp->cblist)) 4376 return 1; 4377 4378 /* Has RCU gone idle with this CPU needing another grace period? */ 4379 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) && 4380 !rcu_rdp_is_offloaded(rdp) && 4381 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 4382 return 1; 4383 4384 /* Have RCU grace period completed or started? */ 4385 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || 4386 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 4387 return 1; 4388 4389 /* nothing to do */ 4390 return 0; 4391 } 4392 4393 /* 4394 * Helper function for rcu_barrier() tracing. If tracing is disabled, 4395 * the compiler is expected to optimize this away. 4396 */ 4397 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done) 4398 { 4399 trace_rcu_barrier(rcu_state.name, s, cpu, 4400 atomic_read(&rcu_state.barrier_cpu_count), done); 4401 } 4402 4403 /* 4404 * RCU callback function for rcu_barrier(). If we are last, wake 4405 * up the task executing rcu_barrier(). 4406 * 4407 * Note that the value of rcu_state.barrier_sequence must be captured 4408 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last, 4409 * other CPUs might count the value down to zero before this CPU gets 4410 * around to invoking rcu_barrier_trace(), which might result in bogus 4411 * data from the next instance of rcu_barrier(). 4412 */ 4413 static void rcu_barrier_callback(struct rcu_head *rhp) 4414 { 4415 unsigned long __maybe_unused s = rcu_state.barrier_sequence; 4416 4417 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) { 4418 rcu_barrier_trace(TPS("LastCB"), -1, s); 4419 complete(&rcu_state.barrier_completion); 4420 } else { 4421 rcu_barrier_trace(TPS("CB"), -1, s); 4422 } 4423 } 4424 4425 /* 4426 * If needed, entrain an rcu_barrier() callback on rdp->cblist. 4427 */ 4428 static void rcu_barrier_entrain(struct rcu_data *rdp) 4429 { 4430 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence); 4431 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap); 4432 bool wake_nocb = false; 4433 bool was_alldone = false; 4434 4435 lockdep_assert_held(&rcu_state.barrier_lock); 4436 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq)) 4437 return; 4438 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence); 4439 rdp->barrier_head.func = rcu_barrier_callback; 4440 debug_rcu_head_queue(&rdp->barrier_head); 4441 rcu_nocb_lock(rdp); 4442 /* 4443 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular 4444 * queue. This way we don't wait for bypass timer that can reach seconds 4445 * if it's fully lazy. 4446 */ 4447 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist); 4448 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); 4449 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist); 4450 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) { 4451 atomic_inc(&rcu_state.barrier_cpu_count); 4452 } else { 4453 debug_rcu_head_unqueue(&rdp->barrier_head); 4454 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence); 4455 } 4456 rcu_nocb_unlock(rdp); 4457 if (wake_nocb) 4458 wake_nocb_gp(rdp, false); 4459 smp_store_release(&rdp->barrier_seq_snap, gseq); 4460 } 4461 4462 /* 4463 * Called with preemption disabled, and from cross-cpu IRQ context. 4464 */ 4465 static void rcu_barrier_handler(void *cpu_in) 4466 { 4467 uintptr_t cpu = (uintptr_t)cpu_in; 4468 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4469 4470 lockdep_assert_irqs_disabled(); 4471 WARN_ON_ONCE(cpu != rdp->cpu); 4472 WARN_ON_ONCE(cpu != smp_processor_id()); 4473 raw_spin_lock(&rcu_state.barrier_lock); 4474 rcu_barrier_entrain(rdp); 4475 raw_spin_unlock(&rcu_state.barrier_lock); 4476 } 4477 4478 /** 4479 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. 4480 * 4481 * Note that this primitive does not necessarily wait for an RCU grace period 4482 * to complete. For example, if there are no RCU callbacks queued anywhere 4483 * in the system, then rcu_barrier() is within its rights to return 4484 * immediately, without waiting for anything, much less an RCU grace period. 4485 */ 4486 void rcu_barrier(void) 4487 { 4488 uintptr_t cpu; 4489 unsigned long flags; 4490 unsigned long gseq; 4491 struct rcu_data *rdp; 4492 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 4493 4494 rcu_barrier_trace(TPS("Begin"), -1, s); 4495 4496 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 4497 mutex_lock(&rcu_state.barrier_mutex); 4498 4499 /* Did someone else do our work for us? */ 4500 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 4501 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence); 4502 smp_mb(); /* caller's subsequent code after above check. */ 4503 mutex_unlock(&rcu_state.barrier_mutex); 4504 return; 4505 } 4506 4507 /* Mark the start of the barrier operation. */ 4508 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 4509 rcu_seq_start(&rcu_state.barrier_sequence); 4510 gseq = rcu_state.barrier_sequence; 4511 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence); 4512 4513 /* 4514 * Initialize the count to two rather than to zero in order 4515 * to avoid a too-soon return to zero in case of an immediate 4516 * invocation of the just-enqueued callback (or preemption of 4517 * this task). Exclude CPU-hotplug operations to ensure that no 4518 * offline non-offloaded CPU has callbacks queued. 4519 */ 4520 init_completion(&rcu_state.barrier_completion); 4521 atomic_set(&rcu_state.barrier_cpu_count, 2); 4522 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4523 4524 /* 4525 * Force each CPU with callbacks to register a new callback. 4526 * When that callback is invoked, we will know that all of the 4527 * corresponding CPU's preceding callbacks have been invoked. 4528 */ 4529 for_each_possible_cpu(cpu) { 4530 rdp = per_cpu_ptr(&rcu_data, cpu); 4531 retry: 4532 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq) 4533 continue; 4534 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 4535 if (!rcu_segcblist_n_cbs(&rdp->cblist)) { 4536 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 4537 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4538 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence); 4539 continue; 4540 } 4541 if (!rcu_rdp_cpu_online(rdp)) { 4542 rcu_barrier_entrain(rdp); 4543 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 4544 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4545 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence); 4546 continue; 4547 } 4548 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 4549 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) { 4550 schedule_timeout_uninterruptible(1); 4551 goto retry; 4552 } 4553 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq); 4554 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence); 4555 } 4556 4557 /* 4558 * Now that we have an rcu_barrier_callback() callback on each 4559 * CPU, and thus each counted, remove the initial count. 4560 */ 4561 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count)) 4562 complete(&rcu_state.barrier_completion); 4563 4564 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */ 4565 wait_for_completion(&rcu_state.barrier_completion); 4566 4567 /* Mark the end of the barrier operation. */ 4568 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence); 4569 rcu_seq_end(&rcu_state.barrier_sequence); 4570 gseq = rcu_state.barrier_sequence; 4571 for_each_possible_cpu(cpu) { 4572 rdp = per_cpu_ptr(&rcu_data, cpu); 4573 4574 WRITE_ONCE(rdp->barrier_seq_snap, gseq); 4575 } 4576 4577 /* Other rcu_barrier() invocations can now safely proceed. */ 4578 mutex_unlock(&rcu_state.barrier_mutex); 4579 } 4580 EXPORT_SYMBOL_GPL(rcu_barrier); 4581 4582 static unsigned long rcu_barrier_last_throttle; 4583 4584 /** 4585 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second 4586 * 4587 * This can be thought of as guard rails around rcu_barrier() that 4588 * permits unrestricted userspace use, at least assuming the hardware's 4589 * try_cmpxchg() is robust. There will be at most one call per second to 4590 * rcu_barrier() system-wide from use of this function, which means that 4591 * callers might needlessly wait a second or three. 4592 * 4593 * This is intended for use by test suites to avoid OOM by flushing RCU 4594 * callbacks from the previous test before starting the next. See the 4595 * rcutree.do_rcu_barrier module parameter for more information. 4596 * 4597 * Why not simply make rcu_barrier() more scalable? That might be 4598 * the eventual endpoint, but let's keep it simple for the time being. 4599 * Note that the module parameter infrastructure serializes calls to a 4600 * given .set() function, but should concurrent .set() invocation ever be 4601 * possible, we are ready! 4602 */ 4603 static void rcu_barrier_throttled(void) 4604 { 4605 unsigned long j = jiffies; 4606 unsigned long old = READ_ONCE(rcu_barrier_last_throttle); 4607 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence); 4608 4609 while (time_in_range(j, old, old + HZ / 16) || 4610 !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) { 4611 schedule_timeout_idle(HZ / 16); 4612 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) { 4613 smp_mb(); /* caller's subsequent code after above check. */ 4614 return; 4615 } 4616 j = jiffies; 4617 old = READ_ONCE(rcu_barrier_last_throttle); 4618 } 4619 rcu_barrier(); 4620 } 4621 4622 /* 4623 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier 4624 * request arrives. We insist on a true value to allow for possible 4625 * future expansion. 4626 */ 4627 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp) 4628 { 4629 bool b; 4630 int ret; 4631 4632 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) 4633 return -EAGAIN; 4634 ret = kstrtobool(val, &b); 4635 if (!ret && b) { 4636 atomic_inc((atomic_t *)kp->arg); 4637 rcu_barrier_throttled(); 4638 atomic_dec((atomic_t *)kp->arg); 4639 } 4640 return ret; 4641 } 4642 4643 /* 4644 * Output the number of outstanding rcutree.do_rcu_barrier requests. 4645 */ 4646 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp) 4647 { 4648 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg)); 4649 } 4650 4651 static const struct kernel_param_ops do_rcu_barrier_ops = { 4652 .set = param_set_do_rcu_barrier, 4653 .get = param_get_do_rcu_barrier, 4654 }; 4655 static atomic_t do_rcu_barrier; 4656 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644); 4657 4658 /* 4659 * Compute the mask of online CPUs for the specified rcu_node structure. 4660 * This will not be stable unless the rcu_node structure's ->lock is 4661 * held, but the bit corresponding to the current CPU will be stable 4662 * in most contexts. 4663 */ 4664 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) 4665 { 4666 return READ_ONCE(rnp->qsmaskinitnext); 4667 } 4668 4669 /* 4670 * Is the CPU corresponding to the specified rcu_data structure online 4671 * from RCU's perspective? This perspective is given by that structure's 4672 * ->qsmaskinitnext field rather than by the global cpu_online_mask. 4673 */ 4674 static bool rcu_rdp_cpu_online(struct rcu_data *rdp) 4675 { 4676 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); 4677 } 4678 4679 bool rcu_cpu_online(int cpu) 4680 { 4681 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4682 4683 return rcu_rdp_cpu_online(rdp); 4684 } 4685 4686 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 4687 4688 /* 4689 * Is the current CPU online as far as RCU is concerned? 4690 * 4691 * Disable preemption to avoid false positives that could otherwise 4692 * happen due to the current CPU number being sampled, this task being 4693 * preempted, its old CPU being taken offline, resuming on some other CPU, 4694 * then determining that its old CPU is now offline. 4695 * 4696 * Disable checking if in an NMI handler because we cannot safely 4697 * report errors from NMI handlers anyway. In addition, it is OK to use 4698 * RCU on an offline processor during initial boot, hence the check for 4699 * rcu_scheduler_fully_active. 4700 */ 4701 bool rcu_lockdep_current_cpu_online(void) 4702 { 4703 struct rcu_data *rdp; 4704 bool ret = false; 4705 4706 if (in_nmi() || !rcu_scheduler_fully_active) 4707 return true; 4708 preempt_disable_notrace(); 4709 rdp = this_cpu_ptr(&rcu_data); 4710 /* 4711 * Strictly, we care here about the case where the current CPU is 4712 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask 4713 * not being up to date. So arch_spin_is_locked() might have a 4714 * false positive if it's held by some *other* CPU, but that's 4715 * OK because that just means a false *negative* on the warning. 4716 */ 4717 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock)) 4718 ret = true; 4719 preempt_enable_notrace(); 4720 return ret; 4721 } 4722 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 4723 4724 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */ 4725 4726 // Has rcu_init() been invoked? This is used (for example) to determine 4727 // whether spinlocks may be acquired safely. 4728 static bool rcu_init_invoked(void) 4729 { 4730 return !!READ_ONCE(rcu_state.n_online_cpus); 4731 } 4732 4733 /* 4734 * All CPUs for the specified rcu_node structure have gone offline, 4735 * and all tasks that were preempted within an RCU read-side critical 4736 * section while running on one of those CPUs have since exited their RCU 4737 * read-side critical section. Some other CPU is reporting this fact with 4738 * the specified rcu_node structure's ->lock held and interrupts disabled. 4739 * This function therefore goes up the tree of rcu_node structures, 4740 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 4741 * the leaf rcu_node structure's ->qsmaskinit field has already been 4742 * updated. 4743 * 4744 * This function does check that the specified rcu_node structure has 4745 * all CPUs offline and no blocked tasks, so it is OK to invoke it 4746 * prematurely. That said, invoking it after the fact will cost you 4747 * a needless lock acquisition. So once it has done its work, don't 4748 * invoke it again. 4749 */ 4750 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) 4751 { 4752 long mask; 4753 struct rcu_node *rnp = rnp_leaf; 4754 4755 raw_lockdep_assert_held_rcu_node(rnp_leaf); 4756 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 4757 WARN_ON_ONCE(rnp_leaf->qsmaskinit) || 4758 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf))) 4759 return; 4760 for (;;) { 4761 mask = rnp->grpmask; 4762 rnp = rnp->parent; 4763 if (!rnp) 4764 break; 4765 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4766 rnp->qsmaskinit &= ~mask; 4767 /* Between grace periods, so better already be zero! */ 4768 WARN_ON_ONCE(rnp->qsmask); 4769 if (rnp->qsmaskinit) { 4770 raw_spin_unlock_rcu_node(rnp); 4771 /* irqs remain disabled. */ 4772 return; 4773 } 4774 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4775 } 4776 } 4777 4778 /* 4779 * Propagate ->qsinitmask bits up the rcu_node tree to account for the 4780 * first CPU in a given leaf rcu_node structure coming online. The caller 4781 * must hold the corresponding leaf rcu_node ->lock with interrupts 4782 * disabled. 4783 */ 4784 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 4785 { 4786 long mask; 4787 long oldmask; 4788 struct rcu_node *rnp = rnp_leaf; 4789 4790 raw_lockdep_assert_held_rcu_node(rnp_leaf); 4791 WARN_ON_ONCE(rnp->wait_blkd_tasks); 4792 for (;;) { 4793 mask = rnp->grpmask; 4794 rnp = rnp->parent; 4795 if (rnp == NULL) 4796 return; 4797 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4798 oldmask = rnp->qsmaskinit; 4799 rnp->qsmaskinit |= mask; 4800 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 4801 if (oldmask) 4802 return; 4803 } 4804 } 4805 4806 /* 4807 * Do boot-time initialization of a CPU's per-CPU RCU data. 4808 */ 4809 static void __init 4810 rcu_boot_init_percpu_data(int cpu) 4811 { 4812 struct context_tracking *ct = this_cpu_ptr(&context_tracking); 4813 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4814 4815 /* Set up local state, ensuring consistent view of global state. */ 4816 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); 4817 INIT_WORK(&rdp->strict_work, strict_work_handler); 4818 WARN_ON_ONCE(ct->nesting != 1); 4819 WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu))); 4820 rdp->barrier_seq_snap = rcu_state.barrier_sequence; 4821 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; 4822 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED; 4823 rdp->rcu_onl_gp_seq = rcu_state.gp_seq; 4824 rdp->rcu_onl_gp_state = RCU_GP_CLEANED; 4825 rdp->last_sched_clock = jiffies; 4826 rdp->cpu = cpu; 4827 rcu_boot_init_nocb_percpu_data(rdp); 4828 } 4829 4830 struct kthread_worker *rcu_exp_gp_kworker; 4831 4832 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp) 4833 { 4834 struct kthread_worker *kworker; 4835 const char *name = "rcu_exp_par_gp_kthread_worker/%d"; 4836 struct sched_param param = { .sched_priority = kthread_prio }; 4837 int rnp_index = rnp - rcu_get_root(); 4838 4839 if (rnp->exp_kworker) 4840 return; 4841 4842 kworker = kthread_create_worker(0, name, rnp_index); 4843 if (IS_ERR_OR_NULL(kworker)) { 4844 pr_err("Failed to create par gp kworker on %d/%d\n", 4845 rnp->grplo, rnp->grphi); 4846 return; 4847 } 4848 WRITE_ONCE(rnp->exp_kworker, kworker); 4849 4850 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4851 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); 4852 } 4853 4854 static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp) 4855 { 4856 struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker); 4857 4858 if (!kworker) 4859 return NULL; 4860 4861 return kworker->task; 4862 } 4863 4864 static void __init rcu_start_exp_gp_kworker(void) 4865 { 4866 const char *name = "rcu_exp_gp_kthread_worker"; 4867 struct sched_param param = { .sched_priority = kthread_prio }; 4868 4869 rcu_exp_gp_kworker = kthread_create_worker(0, name); 4870 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) { 4871 pr_err("Failed to create %s!\n", name); 4872 rcu_exp_gp_kworker = NULL; 4873 return; 4874 } 4875 4876 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD)) 4877 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m); 4878 } 4879 4880 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp) 4881 { 4882 if (rcu_scheduler_fully_active) { 4883 mutex_lock(&rnp->kthread_mutex); 4884 rcu_spawn_one_boost_kthread(rnp); 4885 rcu_spawn_exp_par_gp_kworker(rnp); 4886 mutex_unlock(&rnp->kthread_mutex); 4887 } 4888 } 4889 4890 /* 4891 * Invoked early in the CPU-online process, when pretty much all services 4892 * are available. The incoming CPU is not present. 4893 * 4894 * Initializes a CPU's per-CPU RCU data. Note that only one online or 4895 * offline event can be happening at a given time. Note also that we can 4896 * accept some slop in the rsp->gp_seq access due to the fact that this 4897 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet. 4898 * And any offloaded callbacks are being numbered elsewhere. 4899 */ 4900 int rcutree_prepare_cpu(unsigned int cpu) 4901 { 4902 unsigned long flags; 4903 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); 4904 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 4905 struct rcu_node *rnp = rcu_get_root(); 4906 4907 /* Set up local state, ensuring consistent view of global state. */ 4908 raw_spin_lock_irqsave_rcu_node(rnp, flags); 4909 rdp->qlen_last_fqs_check = 0; 4910 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs); 4911 rdp->blimit = blimit; 4912 ct->nesting = 1; /* CPU not up, no tearing. */ 4913 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 4914 4915 /* 4916 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be 4917 * (re-)initialized. 4918 */ 4919 if (!rcu_segcblist_is_enabled(&rdp->cblist)) 4920 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ 4921 4922 /* 4923 * Add CPU to leaf rcu_node pending-online bitmask. Any needed 4924 * propagation up the rcu_node tree will happen at the beginning 4925 * of the next grace period. 4926 */ 4927 rnp = rdp->mynode; 4928 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 4929 rdp->gp_seq = READ_ONCE(rnp->gp_seq); 4930 rdp->gp_seq_needed = rdp->gp_seq; 4931 rdp->cpu_no_qs.b.norm = true; 4932 rdp->core_needs_qs = false; 4933 rdp->rcu_iw_pending = false; 4934 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); 4935 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4936 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4937 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4938 rcu_spawn_rnp_kthreads(rnp); 4939 rcu_spawn_cpu_nocb_kthread(cpu); 4940 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); 4941 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1); 4942 4943 return 0; 4944 } 4945 4946 /* 4947 * Update kthreads affinity during CPU-hotplug changes. 4948 * 4949 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 4950 * served by the rcu_node in question. The CPU hotplug lock is still 4951 * held, so the value of rnp->qsmaskinit will be stable. 4952 * 4953 * We don't include outgoingcpu in the affinity set, use -1 if there is 4954 * no outgoing CPU. If there are no CPUs left in the affinity set, 4955 * this function allows the kthread to execute on any CPU. 4956 * 4957 * Any future concurrent calls are serialized via ->kthread_mutex. 4958 */ 4959 static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu) 4960 { 4961 cpumask_var_t cm; 4962 unsigned long mask; 4963 struct rcu_data *rdp; 4964 struct rcu_node *rnp; 4965 struct task_struct *task_boost, *task_exp; 4966 4967 rdp = per_cpu_ptr(&rcu_data, cpu); 4968 rnp = rdp->mynode; 4969 4970 task_boost = rcu_boost_task(rnp); 4971 task_exp = rcu_exp_par_gp_task(rnp); 4972 4973 /* 4974 * If CPU is the boot one, those tasks are created later from early 4975 * initcall since kthreadd must be created first. 4976 */ 4977 if (!task_boost && !task_exp) 4978 return; 4979 4980 if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) 4981 return; 4982 4983 mutex_lock(&rnp->kthread_mutex); 4984 mask = rcu_rnp_online_cpus(rnp); 4985 for_each_leaf_node_possible_cpu(rnp, cpu) 4986 if ((mask & leaf_node_cpu_bit(rnp, cpu)) && 4987 cpu != outgoingcpu) 4988 cpumask_set_cpu(cpu, cm); 4989 cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU)); 4990 if (cpumask_empty(cm)) { 4991 cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU)); 4992 if (outgoingcpu >= 0) 4993 cpumask_clear_cpu(outgoingcpu, cm); 4994 } 4995 4996 if (task_exp) 4997 set_cpus_allowed_ptr(task_exp, cm); 4998 4999 if (task_boost) 5000 set_cpus_allowed_ptr(task_boost, cm); 5001 5002 mutex_unlock(&rnp->kthread_mutex); 5003 5004 free_cpumask_var(cm); 5005 } 5006 5007 /* 5008 * Has the specified (known valid) CPU ever been fully online? 5009 */ 5010 bool rcu_cpu_beenfullyonline(int cpu) 5011 { 5012 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 5013 5014 return smp_load_acquire(&rdp->beenonline); 5015 } 5016 5017 /* 5018 * Near the end of the CPU-online process. Pretty much all services 5019 * enabled, and the CPU is now very much alive. 5020 */ 5021 int rcutree_online_cpu(unsigned int cpu) 5022 { 5023 unsigned long flags; 5024 struct rcu_data *rdp; 5025 struct rcu_node *rnp; 5026 5027 rdp = per_cpu_ptr(&rcu_data, cpu); 5028 rnp = rdp->mynode; 5029 raw_spin_lock_irqsave_rcu_node(rnp, flags); 5030 rnp->ffmask |= rdp->grpmask; 5031 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 5032 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 5033 return 0; /* Too early in boot for scheduler work. */ 5034 sync_sched_exp_online_cleanup(cpu); 5035 rcutree_affinity_setting(cpu, -1); 5036 5037 // Stop-machine done, so allow nohz_full to disable tick. 5038 tick_dep_clear(TICK_DEP_BIT_RCU); 5039 return 0; 5040 } 5041 5042 /* 5043 * Mark the specified CPU as being online so that subsequent grace periods 5044 * (both expedited and normal) will wait on it. Note that this means that 5045 * incoming CPUs are not allowed to use RCU read-side critical sections 5046 * until this function is called. Failing to observe this restriction 5047 * will result in lockdep splats. 5048 * 5049 * Note that this function is special in that it is invoked directly 5050 * from the incoming CPU rather than from the cpuhp_step mechanism. 5051 * This is because this function must be invoked at a precise location. 5052 * This incoming CPU must not have enabled interrupts yet. 5053 * 5054 * This mirrors the effects of rcutree_report_cpu_dead(). 5055 */ 5056 void rcutree_report_cpu_starting(unsigned int cpu) 5057 { 5058 unsigned long mask; 5059 struct rcu_data *rdp; 5060 struct rcu_node *rnp; 5061 bool newcpu; 5062 5063 lockdep_assert_irqs_disabled(); 5064 rdp = per_cpu_ptr(&rcu_data, cpu); 5065 if (rdp->cpu_started) 5066 return; 5067 rdp->cpu_started = true; 5068 5069 rnp = rdp->mynode; 5070 mask = rdp->grpmask; 5071 arch_spin_lock(&rcu_state.ofl_lock); 5072 rcu_watching_online(); 5073 raw_spin_lock(&rcu_state.barrier_lock); 5074 raw_spin_lock_rcu_node(rnp); 5075 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask); 5076 raw_spin_unlock(&rcu_state.barrier_lock); 5077 newcpu = !(rnp->expmaskinitnext & mask); 5078 rnp->expmaskinitnext |= mask; 5079 /* Allow lockless access for expedited grace periods. */ 5080 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */ 5081 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus); 5082 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ 5083 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); 5084 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state); 5085 5086 /* An incoming CPU should never be blocking a grace period. */ 5087 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */ 5088 /* rcu_report_qs_rnp() *really* wants some flags to restore */ 5089 unsigned long flags; 5090 5091 local_irq_save(flags); 5092 rcu_disable_urgency_upon_qs(rdp); 5093 /* Report QS -after- changing ->qsmaskinitnext! */ 5094 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 5095 } else { 5096 raw_spin_unlock_rcu_node(rnp); 5097 } 5098 arch_spin_unlock(&rcu_state.ofl_lock); 5099 smp_store_release(&rdp->beenonline, true); 5100 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 5101 } 5102 5103 /* 5104 * The outgoing function has no further need of RCU, so remove it from 5105 * the rcu_node tree's ->qsmaskinitnext bit masks. 5106 * 5107 * Note that this function is special in that it is invoked directly 5108 * from the outgoing CPU rather than from the cpuhp_step mechanism. 5109 * This is because this function must be invoked at a precise location. 5110 * 5111 * This mirrors the effect of rcutree_report_cpu_starting(). 5112 */ 5113 void rcutree_report_cpu_dead(void) 5114 { 5115 unsigned long flags; 5116 unsigned long mask; 5117 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 5118 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 5119 5120 /* 5121 * IRQS must be disabled from now on and until the CPU dies, or an interrupt 5122 * may introduce a new READ-side while it is actually off the QS masks. 5123 */ 5124 lockdep_assert_irqs_disabled(); 5125 // Do any dangling deferred wakeups. 5126 do_nocb_deferred_wakeup(rdp); 5127 5128 rcu_preempt_deferred_qs(current); 5129 5130 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 5131 mask = rdp->grpmask; 5132 arch_spin_lock(&rcu_state.ofl_lock); 5133 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 5134 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); 5135 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state); 5136 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */ 5137 /* Report quiescent state -before- changing ->qsmaskinitnext! */ 5138 rcu_disable_urgency_upon_qs(rdp); 5139 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); 5140 raw_spin_lock_irqsave_rcu_node(rnp, flags); 5141 } 5142 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask); 5143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 5144 arch_spin_unlock(&rcu_state.ofl_lock); 5145 rdp->cpu_started = false; 5146 } 5147 5148 #ifdef CONFIG_HOTPLUG_CPU 5149 /* 5150 * The outgoing CPU has just passed through the dying-idle state, and we 5151 * are being invoked from the CPU that was IPIed to continue the offline 5152 * operation. Migrate the outgoing CPU's callbacks to the current CPU. 5153 */ 5154 void rcutree_migrate_callbacks(int cpu) 5155 { 5156 unsigned long flags; 5157 struct rcu_data *my_rdp; 5158 struct rcu_node *my_rnp; 5159 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 5160 bool needwake; 5161 5162 if (rcu_rdp_is_offloaded(rdp)) 5163 return; 5164 5165 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags); 5166 if (rcu_segcblist_empty(&rdp->cblist)) { 5167 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags); 5168 return; /* No callbacks to migrate. */ 5169 } 5170 5171 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp)); 5172 rcu_barrier_entrain(rdp); 5173 my_rdp = this_cpu_ptr(&rcu_data); 5174 my_rnp = my_rdp->mynode; 5175 rcu_nocb_lock(my_rdp); /* irqs already disabled. */ 5176 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false)); 5177 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */ 5178 /* Leverage recent GPs and set GP for new callbacks. */ 5179 needwake = rcu_advance_cbs(my_rnp, rdp) || 5180 rcu_advance_cbs(my_rnp, my_rdp); 5181 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); 5182 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */ 5183 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp); 5184 rcu_segcblist_disable(&rdp->cblist); 5185 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist)); 5186 check_cb_ovld_locked(my_rdp, my_rnp); 5187 if (rcu_rdp_is_offloaded(my_rdp)) { 5188 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 5189 __call_rcu_nocb_wake(my_rdp, true, flags); 5190 } else { 5191 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */ 5192 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */ 5193 } 5194 local_irq_restore(flags); 5195 if (needwake) 5196 rcu_gp_kthread_wake(); 5197 lockdep_assert_irqs_enabled(); 5198 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || 5199 !rcu_segcblist_empty(&rdp->cblist), 5200 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n", 5201 cpu, rcu_segcblist_n_cbs(&rdp->cblist), 5202 rcu_segcblist_first_cb(&rdp->cblist)); 5203 } 5204 5205 /* 5206 * The CPU has been completely removed, and some other CPU is reporting 5207 * this fact from process context. Do the remainder of the cleanup. 5208 * There can only be one CPU hotplug operation at a time, so no need for 5209 * explicit locking. 5210 */ 5211 int rcutree_dead_cpu(unsigned int cpu) 5212 { 5213 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus); 5214 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1); 5215 // Stop-machine done, so allow nohz_full to disable tick. 5216 tick_dep_clear(TICK_DEP_BIT_RCU); 5217 return 0; 5218 } 5219 5220 /* 5221 * Near the end of the offline process. Trace the fact that this CPU 5222 * is going offline. 5223 */ 5224 int rcutree_dying_cpu(unsigned int cpu) 5225 { 5226 bool blkd; 5227 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); 5228 struct rcu_node *rnp = rdp->mynode; 5229 5230 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask); 5231 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq), 5232 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl")); 5233 return 0; 5234 } 5235 5236 /* 5237 * Near the beginning of the process. The CPU is still very much alive 5238 * with pretty much all services enabled. 5239 */ 5240 int rcutree_offline_cpu(unsigned int cpu) 5241 { 5242 unsigned long flags; 5243 struct rcu_data *rdp; 5244 struct rcu_node *rnp; 5245 5246 rdp = per_cpu_ptr(&rcu_data, cpu); 5247 rnp = rdp->mynode; 5248 raw_spin_lock_irqsave_rcu_node(rnp, flags); 5249 rnp->ffmask &= ~rdp->grpmask; 5250 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 5251 5252 rcutree_affinity_setting(cpu, cpu); 5253 5254 // nohz_full CPUs need the tick for stop-machine to work quickly 5255 tick_dep_set(TICK_DEP_BIT_RCU); 5256 return 0; 5257 } 5258 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 5259 5260 /* 5261 * On non-huge systems, use expedited RCU grace periods to make suspend 5262 * and hibernation run faster. 5263 */ 5264 static int rcu_pm_notify(struct notifier_block *self, 5265 unsigned long action, void *hcpu) 5266 { 5267 switch (action) { 5268 case PM_HIBERNATION_PREPARE: 5269 case PM_SUSPEND_PREPARE: 5270 rcu_async_hurry(); 5271 rcu_expedite_gp(); 5272 break; 5273 case PM_POST_HIBERNATION: 5274 case PM_POST_SUSPEND: 5275 rcu_unexpedite_gp(); 5276 rcu_async_relax(); 5277 break; 5278 default: 5279 break; 5280 } 5281 return NOTIFY_OK; 5282 } 5283 5284 /* 5285 * Spawn the kthreads that handle RCU's grace periods. 5286 */ 5287 static int __init rcu_spawn_gp_kthread(void) 5288 { 5289 unsigned long flags; 5290 struct rcu_node *rnp; 5291 struct sched_param sp; 5292 struct task_struct *t; 5293 struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 5294 5295 rcu_scheduler_fully_active = 1; 5296 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name); 5297 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__)) 5298 return 0; 5299 if (kthread_prio) { 5300 sp.sched_priority = kthread_prio; 5301 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 5302 } 5303 rnp = rcu_get_root(); 5304 raw_spin_lock_irqsave_rcu_node(rnp, flags); 5305 WRITE_ONCE(rcu_state.gp_activity, jiffies); 5306 WRITE_ONCE(rcu_state.gp_req_activity, jiffies); 5307 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread. 5308 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */ 5309 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 5310 wake_up_process(t); 5311 /* This is a pre-SMP initcall, we expect a single CPU */ 5312 WARN_ON(num_online_cpus() > 1); 5313 /* 5314 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu() 5315 * due to rcu_scheduler_fully_active. 5316 */ 5317 rcu_spawn_cpu_nocb_kthread(smp_processor_id()); 5318 rcu_spawn_rnp_kthreads(rdp->mynode); 5319 rcu_spawn_core_kthreads(); 5320 /* Create kthread worker for expedited GPs */ 5321 rcu_start_exp_gp_kworker(); 5322 return 0; 5323 } 5324 early_initcall(rcu_spawn_gp_kthread); 5325 5326 /* 5327 * This function is invoked towards the end of the scheduler's 5328 * initialization process. Before this is called, the idle task might 5329 * contain synchronous grace-period primitives (during which time, this idle 5330 * task is booting the system, and such primitives are no-ops). After this 5331 * function is called, any synchronous grace-period primitives are run as 5332 * expedited, with the requesting task driving the grace period forward. 5333 * A later core_initcall() rcu_set_runtime_mode() will switch to full 5334 * runtime RCU functionality. 5335 */ 5336 void rcu_scheduler_starting(void) 5337 { 5338 unsigned long flags; 5339 struct rcu_node *rnp; 5340 5341 WARN_ON(num_online_cpus() != 1); 5342 WARN_ON(nr_context_switches() > 0); 5343 rcu_test_sync_prims(); 5344 5345 // Fix up the ->gp_seq counters. 5346 local_irq_save(flags); 5347 rcu_for_each_node_breadth_first(rnp) 5348 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq; 5349 local_irq_restore(flags); 5350 5351 // Switch out of early boot mode. 5352 rcu_scheduler_active = RCU_SCHEDULER_INIT; 5353 rcu_test_sync_prims(); 5354 } 5355 5356 /* 5357 * Helper function for rcu_init() that initializes the rcu_state structure. 5358 */ 5359 static void __init rcu_init_one(void) 5360 { 5361 static const char * const buf[] = RCU_NODE_NAME_INIT; 5362 static const char * const fqs[] = RCU_FQS_NAME_INIT; 5363 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; 5364 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; 5365 5366 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 5367 int cpustride = 1; 5368 int i; 5369 int j; 5370 struct rcu_node *rnp; 5371 5372 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 5373 5374 /* Silence gcc 4.8 false positive about array index out of range. */ 5375 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) 5376 panic("rcu_init_one: rcu_num_lvls out of range"); 5377 5378 /* Initialize the level-tracking arrays. */ 5379 5380 for (i = 1; i < rcu_num_lvls; i++) 5381 rcu_state.level[i] = 5382 rcu_state.level[i - 1] + num_rcu_lvl[i - 1]; 5383 rcu_init_levelspread(levelspread, num_rcu_lvl); 5384 5385 /* Initialize the elements themselves, starting from the leaves. */ 5386 5387 for (i = rcu_num_lvls - 1; i >= 0; i--) { 5388 cpustride *= levelspread[i]; 5389 rnp = rcu_state.level[i]; 5390 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) { 5391 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock)); 5392 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock), 5393 &rcu_node_class[i], buf[i]); 5394 raw_spin_lock_init(&rnp->fqslock); 5395 lockdep_set_class_and_name(&rnp->fqslock, 5396 &rcu_fqs_class[i], fqs[i]); 5397 rnp->gp_seq = rcu_state.gp_seq; 5398 rnp->gp_seq_needed = rcu_state.gp_seq; 5399 rnp->completedqs = rcu_state.gp_seq; 5400 rnp->qsmask = 0; 5401 rnp->qsmaskinit = 0; 5402 rnp->grplo = j * cpustride; 5403 rnp->grphi = (j + 1) * cpustride - 1; 5404 if (rnp->grphi >= nr_cpu_ids) 5405 rnp->grphi = nr_cpu_ids - 1; 5406 if (i == 0) { 5407 rnp->grpnum = 0; 5408 rnp->grpmask = 0; 5409 rnp->parent = NULL; 5410 } else { 5411 rnp->grpnum = j % levelspread[i - 1]; 5412 rnp->grpmask = BIT(rnp->grpnum); 5413 rnp->parent = rcu_state.level[i - 1] + 5414 j / levelspread[i - 1]; 5415 } 5416 rnp->level = i; 5417 INIT_LIST_HEAD(&rnp->blkd_tasks); 5418 rcu_init_one_nocb(rnp); 5419 init_waitqueue_head(&rnp->exp_wq[0]); 5420 init_waitqueue_head(&rnp->exp_wq[1]); 5421 init_waitqueue_head(&rnp->exp_wq[2]); 5422 init_waitqueue_head(&rnp->exp_wq[3]); 5423 spin_lock_init(&rnp->exp_lock); 5424 mutex_init(&rnp->kthread_mutex); 5425 raw_spin_lock_init(&rnp->exp_poll_lock); 5426 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED; 5427 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp); 5428 } 5429 } 5430 5431 init_swait_queue_head(&rcu_state.gp_wq); 5432 init_swait_queue_head(&rcu_state.expedited_wq); 5433 rnp = rcu_first_leaf_node(); 5434 for_each_possible_cpu(i) { 5435 while (i > rnp->grphi) 5436 rnp++; 5437 per_cpu_ptr(&rcu_data, i)->mynode = rnp; 5438 rcu_boot_init_percpu_data(i); 5439 } 5440 } 5441 5442 /* 5443 * Force priority from the kernel command-line into range. 5444 */ 5445 static void __init sanitize_kthread_prio(void) 5446 { 5447 int kthread_prio_in = kthread_prio; 5448 5449 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2 5450 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) 5451 kthread_prio = 2; 5452 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 5453 kthread_prio = 1; 5454 else if (kthread_prio < 0) 5455 kthread_prio = 0; 5456 else if (kthread_prio > 99) 5457 kthread_prio = 99; 5458 5459 if (kthread_prio != kthread_prio_in) 5460 pr_alert("%s: Limited prio to %d from %d\n", 5461 __func__, kthread_prio, kthread_prio_in); 5462 } 5463 5464 /* 5465 * Compute the rcu_node tree geometry from kernel parameters. This cannot 5466 * replace the definitions in tree.h because those are needed to size 5467 * the ->node array in the rcu_state structure. 5468 */ 5469 void rcu_init_geometry(void) 5470 { 5471 ulong d; 5472 int i; 5473 static unsigned long old_nr_cpu_ids; 5474 int rcu_capacity[RCU_NUM_LVLS]; 5475 static bool initialized; 5476 5477 if (initialized) { 5478 /* 5479 * Warn if setup_nr_cpu_ids() had not yet been invoked, 5480 * unless nr_cpus_ids == NR_CPUS, in which case who cares? 5481 */ 5482 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids); 5483 return; 5484 } 5485 5486 old_nr_cpu_ids = nr_cpu_ids; 5487 initialized = true; 5488 5489 /* 5490 * Initialize any unspecified boot parameters. 5491 * The default values of jiffies_till_first_fqs and 5492 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS 5493 * value, which is a function of HZ, then adding one for each 5494 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system. 5495 */ 5496 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; 5497 if (jiffies_till_first_fqs == ULONG_MAX) 5498 jiffies_till_first_fqs = d; 5499 if (jiffies_till_next_fqs == ULONG_MAX) 5500 jiffies_till_next_fqs = d; 5501 adjust_jiffies_till_sched_qs(); 5502 5503 /* If the compile-time values are accurate, just leave. */ 5504 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 5505 nr_cpu_ids == NR_CPUS) 5506 return; 5507 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 5508 rcu_fanout_leaf, nr_cpu_ids); 5509 5510 /* 5511 * The boot-time rcu_fanout_leaf parameter must be at least two 5512 * and cannot exceed the number of bits in the rcu_node masks. 5513 * Complain and fall back to the compile-time values if this 5514 * limit is exceeded. 5515 */ 5516 if (rcu_fanout_leaf < 2 || 5517 rcu_fanout_leaf > sizeof(unsigned long) * 8) { 5518 rcu_fanout_leaf = RCU_FANOUT_LEAF; 5519 WARN_ON(1); 5520 return; 5521 } 5522 5523 /* 5524 * Compute number of nodes that can be handled an rcu_node tree 5525 * with the given number of levels. 5526 */ 5527 rcu_capacity[0] = rcu_fanout_leaf; 5528 for (i = 1; i < RCU_NUM_LVLS; i++) 5529 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; 5530 5531 /* 5532 * The tree must be able to accommodate the configured number of CPUs. 5533 * If this limit is exceeded, fall back to the compile-time values. 5534 */ 5535 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) { 5536 rcu_fanout_leaf = RCU_FANOUT_LEAF; 5537 WARN_ON(1); 5538 return; 5539 } 5540 5541 /* Calculate the number of levels in the tree. */ 5542 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) { 5543 } 5544 rcu_num_lvls = i + 1; 5545 5546 /* Calculate the number of rcu_nodes at each level of the tree. */ 5547 for (i = 0; i < rcu_num_lvls; i++) { 5548 int cap = rcu_capacity[(rcu_num_lvls - 1) - i]; 5549 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap); 5550 } 5551 5552 /* Calculate the total number of rcu_node structures. */ 5553 rcu_num_nodes = 0; 5554 for (i = 0; i < rcu_num_lvls; i++) 5555 rcu_num_nodes += num_rcu_lvl[i]; 5556 } 5557 5558 /* 5559 * Dump out the structure of the rcu_node combining tree associated 5560 * with the rcu_state structure. 5561 */ 5562 static void __init rcu_dump_rcu_node_tree(void) 5563 { 5564 int level = 0; 5565 struct rcu_node *rnp; 5566 5567 pr_info("rcu_node tree layout dump\n"); 5568 pr_info(" "); 5569 rcu_for_each_node_breadth_first(rnp) { 5570 if (rnp->level != level) { 5571 pr_cont("\n"); 5572 pr_info(" "); 5573 level = rnp->level; 5574 } 5575 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); 5576 } 5577 pr_cont("\n"); 5578 } 5579 5580 struct workqueue_struct *rcu_gp_wq; 5581 5582 static void __init kfree_rcu_batch_init(void) 5583 { 5584 int cpu; 5585 int i, j; 5586 struct shrinker *kfree_rcu_shrinker; 5587 5588 /* Clamp it to [0:100] seconds interval. */ 5589 if (rcu_delay_page_cache_fill_msec < 0 || 5590 rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) { 5591 5592 rcu_delay_page_cache_fill_msec = 5593 clamp(rcu_delay_page_cache_fill_msec, 0, 5594 (int) (100 * MSEC_PER_SEC)); 5595 5596 pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n", 5597 rcu_delay_page_cache_fill_msec); 5598 } 5599 5600 for_each_possible_cpu(cpu) { 5601 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); 5602 5603 for (i = 0; i < KFREE_N_BATCHES; i++) { 5604 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); 5605 krcp->krw_arr[i].krcp = krcp; 5606 5607 for (j = 0; j < FREE_N_CHANNELS; j++) 5608 INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]); 5609 } 5610 5611 for (i = 0; i < FREE_N_CHANNELS; i++) 5612 INIT_LIST_HEAD(&krcp->bulk_head[i]); 5613 5614 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); 5615 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); 5616 krcp->initialized = true; 5617 } 5618 5619 kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree"); 5620 if (!kfree_rcu_shrinker) { 5621 pr_err("Failed to allocate kfree_rcu() shrinker!\n"); 5622 return; 5623 } 5624 5625 kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count; 5626 kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan; 5627 5628 shrinker_register(kfree_rcu_shrinker); 5629 } 5630 5631 void __init rcu_init(void) 5632 { 5633 int cpu = smp_processor_id(); 5634 5635 rcu_early_boot_tests(); 5636 5637 kfree_rcu_batch_init(); 5638 rcu_bootup_announce(); 5639 sanitize_kthread_prio(); 5640 rcu_init_geometry(); 5641 rcu_init_one(); 5642 if (dump_tree) 5643 rcu_dump_rcu_node_tree(); 5644 if (use_softirq) 5645 open_softirq(RCU_SOFTIRQ, rcu_core_si); 5646 5647 /* 5648 * We don't need protection against CPU-hotplug here because 5649 * this is called early in boot, before either interrupts 5650 * or the scheduler are operational. 5651 */ 5652 pm_notifier(rcu_pm_notify, 0); 5653 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot. 5654 rcutree_prepare_cpu(cpu); 5655 rcutree_report_cpu_starting(cpu); 5656 rcutree_online_cpu(cpu); 5657 5658 /* Create workqueue for Tree SRCU and for expedited GPs. */ 5659 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); 5660 WARN_ON(!rcu_gp_wq); 5661 5662 sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0); 5663 WARN_ON(!sync_wq); 5664 5665 /* Fill in default value for rcutree.qovld boot parameter. */ 5666 /* -After- the rcu_node ->lock fields are initialized! */ 5667 if (qovld < 0) 5668 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark; 5669 else 5670 qovld_calc = qovld; 5671 5672 // Kick-start in case any polled grace periods started early. 5673 (void)start_poll_synchronize_rcu_expedited(); 5674 5675 rcu_test_sync_prims(); 5676 5677 tasks_cblist_init_generic(); 5678 } 5679 5680 #include "tree_stall.h" 5681 #include "tree_exp.h" 5682 #include "tree_nocb.h" 5683 #include "tree_plugin.h" 5684