1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 5 * Copyright (C) 2002 Ingo Molnar 6 * 7 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <[email protected]> 9 * Andrew Morton 10 * Kai Petzke <[email protected]> 11 * Theodore Ts'o <[email protected]> 12 * 13 * Made to use alloc_percpu by Christoph Lameter. 14 * 15 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <[email protected]> 17 * 18 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worker pools for each CPU (one for 21 * normal work items and the other for high priority ones) and some extra 22 * pools for workqueues which are not bound to any specific CPU - the 23 * number of these backing pools is dynamic. 24 * 25 * Please read Documentation/core-api/workqueue.rst for details. 26 */ 27 28 #include <linux/export.h> 29 #include <linux/kernel.h> 30 #include <linux/sched.h> 31 #include <linux/init.h> 32 #include <linux/signal.h> 33 #include <linux/completion.h> 34 #include <linux/workqueue.h> 35 #include <linux/slab.h> 36 #include <linux/cpu.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/hardirq.h> 40 #include <linux/mempolicy.h> 41 #include <linux/freezer.h> 42 #include <linux/debug_locks.h> 43 #include <linux/lockdep.h> 44 #include <linux/idr.h> 45 #include <linux/jhash.h> 46 #include <linux/hashtable.h> 47 #include <linux/rculist.h> 48 #include <linux/nodemask.h> 49 #include <linux/moduleparam.h> 50 #include <linux/uaccess.h> 51 #include <linux/sched/isolation.h> 52 #include <linux/sched/debug.h> 53 #include <linux/nmi.h> 54 #include <linux/kvm_para.h> 55 #include <linux/delay.h> 56 57 #include "workqueue_internal.h" 58 59 enum { 60 /* 61 * worker_pool flags 62 * 63 * A bound pool is either associated or disassociated with its CPU. 64 * While associated (!DISASSOCIATED), all workers are bound to the 65 * CPU and none has %WORKER_UNBOUND set and concurrency management 66 * is in effect. 67 * 68 * While DISASSOCIATED, the cpu may be offline and all workers have 69 * %WORKER_UNBOUND set and concurrency management disabled, and may 70 * be executing on any CPU. The pool behaves as an unbound one. 71 * 72 * Note that DISASSOCIATED should be flipped only while holding 73 * wq_pool_attach_mutex to avoid changing binding state while 74 * worker_attach_to_pool() is in progress. 75 */ 76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 78 79 /* worker flags */ 80 WORKER_DIE = 1 << 1, /* die die die */ 81 WORKER_IDLE = 1 << 2, /* is idle */ 82 WORKER_PREP = 1 << 3, /* preparing to run works */ 83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 85 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 86 87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 88 WORKER_UNBOUND | WORKER_REBOUND, 89 90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 91 92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 94 95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 97 98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 99 /* call for help after 10ms 100 (min two ticks) */ 101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 102 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 103 104 /* 105 * Rescue workers are used only on emergencies and shared by 106 * all cpus. Give MIN_NICE. 107 */ 108 RESCUER_NICE_LEVEL = MIN_NICE, 109 HIGHPRI_NICE_LEVEL = MIN_NICE, 110 111 WQ_NAME_LEN = 24, 112 }; 113 114 /* 115 * Structure fields follow one of the following exclusion rules. 116 * 117 * I: Modifiable by initialization/destruction paths and read-only for 118 * everyone else. 119 * 120 * P: Preemption protected. Disabling preemption is enough and should 121 * only be modified and accessed from the local cpu. 122 * 123 * L: pool->lock protected. Access with pool->lock held. 124 * 125 * K: Only modified by worker while holding pool->lock. Can be safely read by 126 * self, while holding pool->lock or from IRQ context if %current is the 127 * kworker. 128 * 129 * S: Only modified by worker self. 130 * 131 * A: wq_pool_attach_mutex protected. 132 * 133 * PL: wq_pool_mutex protected. 134 * 135 * PR: wq_pool_mutex protected for writes. RCU protected for reads. 136 * 137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 138 * 139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 140 * RCU for reads. 141 * 142 * WQ: wq->mutex protected. 143 * 144 * WR: wq->mutex protected for writes. RCU protected for reads. 145 * 146 * MD: wq_mayday_lock protected. 147 * 148 * WD: Used internally by the watchdog. 149 */ 150 151 /* struct worker is defined in workqueue_internal.h */ 152 153 struct worker_pool { 154 raw_spinlock_t lock; /* the pool lock */ 155 int cpu; /* I: the associated cpu */ 156 int node; /* I: the associated node ID */ 157 int id; /* I: pool ID */ 158 unsigned int flags; /* L: flags */ 159 160 unsigned long watchdog_ts; /* L: watchdog timestamp */ 161 bool cpu_stall; /* WD: stalled cpu bound pool */ 162 163 /* 164 * The counter is incremented in a process context on the associated CPU 165 * w/ preemption disabled, and decremented or reset in the same context 166 * but w/ pool->lock held. The readers grab pool->lock and are 167 * guaranteed to see if the counter reached zero. 168 */ 169 int nr_running; 170 171 struct list_head worklist; /* L: list of pending works */ 172 173 int nr_workers; /* L: total number of workers */ 174 int nr_idle; /* L: currently idle workers */ 175 176 struct list_head idle_list; /* L: list of idle workers */ 177 struct timer_list idle_timer; /* L: worker idle timeout */ 178 struct work_struct idle_cull_work; /* L: worker idle cleanup */ 179 180 struct timer_list mayday_timer; /* L: SOS timer for workers */ 181 182 /* a workers is either on busy_hash or idle_list, or the manager */ 183 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 184 /* L: hash of busy workers */ 185 186 struct worker *manager; /* L: purely informational */ 187 struct list_head workers; /* A: attached workers */ 188 struct list_head dying_workers; /* A: workers about to die */ 189 struct completion *detach_completion; /* all workers detached */ 190 191 struct ida worker_ida; /* worker IDs for task name */ 192 193 struct workqueue_attrs *attrs; /* I: worker attributes */ 194 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 195 int refcnt; /* PL: refcnt for unbound pools */ 196 197 /* 198 * Destruction of pool is RCU protected to allow dereferences 199 * from get_work_pool(). 200 */ 201 struct rcu_head rcu; 202 }; 203 204 /* 205 * Per-pool_workqueue statistics. These can be monitored using 206 * tools/workqueue/wq_monitor.py. 207 */ 208 enum pool_workqueue_stats { 209 PWQ_STAT_STARTED, /* work items started execution */ 210 PWQ_STAT_COMPLETED, /* work items completed execution */ 211 PWQ_STAT_CPU_TIME, /* total CPU time consumed */ 212 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */ 213 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */ 214 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */ 215 PWQ_STAT_MAYDAY, /* maydays to rescuer */ 216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */ 217 218 PWQ_NR_STATS, 219 }; 220 221 /* 222 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 223 * of work_struct->data are used for flags and the remaining high bits 224 * point to the pwq; thus, pwqs need to be aligned at two's power of the 225 * number of flag bits. 226 */ 227 struct pool_workqueue { 228 struct worker_pool *pool; /* I: the associated pool */ 229 struct workqueue_struct *wq; /* I: the owning workqueue */ 230 int work_color; /* L: current color */ 231 int flush_color; /* L: flushing color */ 232 int refcnt; /* L: reference count */ 233 int nr_in_flight[WORK_NR_COLORS]; 234 /* L: nr of in_flight works */ 235 236 /* 237 * nr_active management and WORK_STRUCT_INACTIVE: 238 * 239 * When pwq->nr_active >= max_active, new work item is queued to 240 * pwq->inactive_works instead of pool->worklist and marked with 241 * WORK_STRUCT_INACTIVE. 242 * 243 * All work items marked with WORK_STRUCT_INACTIVE do not participate 244 * in pwq->nr_active and all work items in pwq->inactive_works are 245 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE 246 * work items are in pwq->inactive_works. Some of them are ready to 247 * run in pool->worklist or worker->scheduled. Those work itmes are 248 * only struct wq_barrier which is used for flush_work() and should 249 * not participate in pwq->nr_active. For non-barrier work item, it 250 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works. 251 */ 252 int nr_active; /* L: nr of active works */ 253 int max_active; /* L: max active works */ 254 struct list_head inactive_works; /* L: inactive works */ 255 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 256 struct list_head mayday_node; /* MD: node on wq->maydays */ 257 258 u64 stats[PWQ_NR_STATS]; 259 260 /* 261 * Release of unbound pwq is punted to a kthread_worker. See put_pwq() 262 * and pwq_release_workfn() for details. pool_workqueue itself is also 263 * RCU protected so that the first pwq can be determined without 264 * grabbing wq->mutex. 265 */ 266 struct kthread_work release_work; 267 struct rcu_head rcu; 268 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 269 270 /* 271 * Structure used to wait for workqueue flush. 272 */ 273 struct wq_flusher { 274 struct list_head list; /* WQ: list of flushers */ 275 int flush_color; /* WQ: flush color waiting for */ 276 struct completion done; /* flush completion */ 277 }; 278 279 struct wq_device; 280 281 /* 282 * The externally visible workqueue. It relays the issued work items to 283 * the appropriate worker_pool through its pool_workqueues. 284 */ 285 struct workqueue_struct { 286 struct list_head pwqs; /* WR: all pwqs of this wq */ 287 struct list_head list; /* PR: list of all workqueues */ 288 289 struct mutex mutex; /* protects this wq */ 290 int work_color; /* WQ: current work color */ 291 int flush_color; /* WQ: current flush color */ 292 atomic_t nr_pwqs_to_flush; /* flush in progress */ 293 struct wq_flusher *first_flusher; /* WQ: first flusher */ 294 struct list_head flusher_queue; /* WQ: flush waiters */ 295 struct list_head flusher_overflow; /* WQ: flush overflow list */ 296 297 struct list_head maydays; /* MD: pwqs requesting rescue */ 298 struct worker *rescuer; /* MD: rescue worker */ 299 300 int nr_drainers; /* WQ: drain in progress */ 301 int saved_max_active; /* WQ: saved pwq max_active */ 302 303 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 304 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ 305 306 #ifdef CONFIG_SYSFS 307 struct wq_device *wq_dev; /* I: for sysfs interface */ 308 #endif 309 #ifdef CONFIG_LOCKDEP 310 char *lock_name; 311 struct lock_class_key key; 312 struct lockdep_map lockdep_map; 313 #endif 314 char name[WQ_NAME_LEN]; /* I: workqueue name */ 315 316 /* 317 * Destruction of workqueue_struct is RCU protected to allow walking 318 * the workqueues list without grabbing wq_pool_mutex. 319 * This is used to dump all workqueues from sysrq. 320 */ 321 struct rcu_head rcu; 322 323 /* hot fields used during command issue, aligned to cacheline */ 324 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 325 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ 326 }; 327 328 static struct kmem_cache *pwq_cache; 329 330 /* 331 * Each pod type describes how CPUs should be grouped for unbound workqueues. 332 * See the comment above workqueue_attrs->affn_scope. 333 */ 334 struct wq_pod_type { 335 int nr_pods; /* number of pods */ 336 cpumask_var_t *pod_cpus; /* pod -> cpus */ 337 int *pod_node; /* pod -> node */ 338 int *cpu_pod; /* cpu -> pod */ 339 }; 340 341 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES]; 342 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_DFL; 343 344 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { 345 [WQ_AFFN_CPU] = "cpu", 346 [WQ_AFFN_SMT] = "smt", 347 [WQ_AFFN_CACHE] = "cache", 348 [WQ_AFFN_NUMA] = "numa", 349 [WQ_AFFN_SYSTEM] = "system", 350 }; 351 352 /* 353 * Per-cpu work items which run for longer than the following threshold are 354 * automatically considered CPU intensive and excluded from concurrency 355 * management to prevent them from noticeably delaying other per-cpu work items. 356 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter. 357 * The actual value is initialized in wq_cpu_intensive_thresh_init(). 358 */ 359 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX; 360 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644); 361 362 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 363 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 364 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 365 366 static bool wq_online; /* can kworkers be created yet? */ 367 368 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */ 369 static struct workqueue_attrs *wq_update_pod_attrs_buf; 370 371 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 372 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */ 373 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 374 /* wait for manager to go away */ 375 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait); 376 377 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 378 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 379 380 /* PL&A: allowable cpus for unbound wqs and work items */ 381 static cpumask_var_t wq_unbound_cpumask; 382 383 /* for further constrain wq_unbound_cpumask by cmdline parameter*/ 384 static struct cpumask wq_cmdline_cpumask __initdata; 385 386 /* CPU where unbound work was last round robin scheduled from this CPU */ 387 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 388 389 /* 390 * Local execution of unbound work items is no longer guaranteed. The 391 * following always forces round-robin CPU selection on unbound work items 392 * to uncover usages which depend on it. 393 */ 394 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 395 static bool wq_debug_force_rr_cpu = true; 396 #else 397 static bool wq_debug_force_rr_cpu = false; 398 #endif 399 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 400 401 /* the per-cpu worker pools */ 402 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 403 404 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 405 406 /* PL: hash of all unbound pools keyed by pool->attrs */ 407 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 408 409 /* I: attributes used when instantiating standard unbound pools on demand */ 410 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 411 412 /* I: attributes used when instantiating ordered pools on demand */ 413 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 414 415 /* 416 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a 417 * process context while holding a pool lock. Bounce to a dedicated kthread 418 * worker to avoid A-A deadlocks. 419 */ 420 static struct kthread_worker *pwq_release_worker; 421 422 struct workqueue_struct *system_wq __read_mostly; 423 EXPORT_SYMBOL(system_wq); 424 struct workqueue_struct *system_highpri_wq __read_mostly; 425 EXPORT_SYMBOL_GPL(system_highpri_wq); 426 struct workqueue_struct *system_long_wq __read_mostly; 427 EXPORT_SYMBOL_GPL(system_long_wq); 428 struct workqueue_struct *system_unbound_wq __read_mostly; 429 EXPORT_SYMBOL_GPL(system_unbound_wq); 430 struct workqueue_struct *system_freezable_wq __read_mostly; 431 EXPORT_SYMBOL_GPL(system_freezable_wq); 432 struct workqueue_struct *system_power_efficient_wq __read_mostly; 433 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 434 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 435 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 436 437 static int worker_thread(void *__worker); 438 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 439 static void show_pwq(struct pool_workqueue *pwq); 440 static void show_one_worker_pool(struct worker_pool *pool); 441 442 #define CREATE_TRACE_POINTS 443 #include <trace/events/workqueue.h> 444 445 #define assert_rcu_or_pool_mutex() \ 446 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 447 !lockdep_is_held(&wq_pool_mutex), \ 448 "RCU or wq_pool_mutex should be held") 449 450 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 451 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 452 !lockdep_is_held(&wq->mutex) && \ 453 !lockdep_is_held(&wq_pool_mutex), \ 454 "RCU, wq->mutex or wq_pool_mutex should be held") 455 456 #define for_each_cpu_worker_pool(pool, cpu) \ 457 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 458 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 459 (pool)++) 460 461 /** 462 * for_each_pool - iterate through all worker_pools in the system 463 * @pool: iteration cursor 464 * @pi: integer used for iteration 465 * 466 * This must be called either with wq_pool_mutex held or RCU read 467 * locked. If the pool needs to be used beyond the locking in effect, the 468 * caller is responsible for guaranteeing that the pool stays online. 469 * 470 * The if/else clause exists only for the lockdep assertion and can be 471 * ignored. 472 */ 473 #define for_each_pool(pool, pi) \ 474 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 475 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 476 else 477 478 /** 479 * for_each_pool_worker - iterate through all workers of a worker_pool 480 * @worker: iteration cursor 481 * @pool: worker_pool to iterate workers of 482 * 483 * This must be called with wq_pool_attach_mutex. 484 * 485 * The if/else clause exists only for the lockdep assertion and can be 486 * ignored. 487 */ 488 #define for_each_pool_worker(worker, pool) \ 489 list_for_each_entry((worker), &(pool)->workers, node) \ 490 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \ 491 else 492 493 /** 494 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 495 * @pwq: iteration cursor 496 * @wq: the target workqueue 497 * 498 * This must be called either with wq->mutex held or RCU read locked. 499 * If the pwq needs to be used beyond the locking in effect, the caller is 500 * responsible for guaranteeing that the pwq stays online. 501 * 502 * The if/else clause exists only for the lockdep assertion and can be 503 * ignored. 504 */ 505 #define for_each_pwq(pwq, wq) \ 506 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 507 lockdep_is_held(&(wq->mutex))) 508 509 #ifdef CONFIG_DEBUG_OBJECTS_WORK 510 511 static const struct debug_obj_descr work_debug_descr; 512 513 static void *work_debug_hint(void *addr) 514 { 515 return ((struct work_struct *) addr)->func; 516 } 517 518 static bool work_is_static_object(void *addr) 519 { 520 struct work_struct *work = addr; 521 522 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 523 } 524 525 /* 526 * fixup_init is called when: 527 * - an active object is initialized 528 */ 529 static bool work_fixup_init(void *addr, enum debug_obj_state state) 530 { 531 struct work_struct *work = addr; 532 533 switch (state) { 534 case ODEBUG_STATE_ACTIVE: 535 cancel_work_sync(work); 536 debug_object_init(work, &work_debug_descr); 537 return true; 538 default: 539 return false; 540 } 541 } 542 543 /* 544 * fixup_free is called when: 545 * - an active object is freed 546 */ 547 static bool work_fixup_free(void *addr, enum debug_obj_state state) 548 { 549 struct work_struct *work = addr; 550 551 switch (state) { 552 case ODEBUG_STATE_ACTIVE: 553 cancel_work_sync(work); 554 debug_object_free(work, &work_debug_descr); 555 return true; 556 default: 557 return false; 558 } 559 } 560 561 static const struct debug_obj_descr work_debug_descr = { 562 .name = "work_struct", 563 .debug_hint = work_debug_hint, 564 .is_static_object = work_is_static_object, 565 .fixup_init = work_fixup_init, 566 .fixup_free = work_fixup_free, 567 }; 568 569 static inline void debug_work_activate(struct work_struct *work) 570 { 571 debug_object_activate(work, &work_debug_descr); 572 } 573 574 static inline void debug_work_deactivate(struct work_struct *work) 575 { 576 debug_object_deactivate(work, &work_debug_descr); 577 } 578 579 void __init_work(struct work_struct *work, int onstack) 580 { 581 if (onstack) 582 debug_object_init_on_stack(work, &work_debug_descr); 583 else 584 debug_object_init(work, &work_debug_descr); 585 } 586 EXPORT_SYMBOL_GPL(__init_work); 587 588 void destroy_work_on_stack(struct work_struct *work) 589 { 590 debug_object_free(work, &work_debug_descr); 591 } 592 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 593 594 void destroy_delayed_work_on_stack(struct delayed_work *work) 595 { 596 destroy_timer_on_stack(&work->timer); 597 debug_object_free(&work->work, &work_debug_descr); 598 } 599 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 600 601 #else 602 static inline void debug_work_activate(struct work_struct *work) { } 603 static inline void debug_work_deactivate(struct work_struct *work) { } 604 #endif 605 606 /** 607 * worker_pool_assign_id - allocate ID and assign it to @pool 608 * @pool: the pool pointer of interest 609 * 610 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 611 * successfully, -errno on failure. 612 */ 613 static int worker_pool_assign_id(struct worker_pool *pool) 614 { 615 int ret; 616 617 lockdep_assert_held(&wq_pool_mutex); 618 619 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 620 GFP_KERNEL); 621 if (ret >= 0) { 622 pool->id = ret; 623 return 0; 624 } 625 return ret; 626 } 627 628 static unsigned int work_color_to_flags(int color) 629 { 630 return color << WORK_STRUCT_COLOR_SHIFT; 631 } 632 633 static int get_work_color(unsigned long work_data) 634 { 635 return (work_data >> WORK_STRUCT_COLOR_SHIFT) & 636 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 637 } 638 639 static int work_next_color(int color) 640 { 641 return (color + 1) % WORK_NR_COLORS; 642 } 643 644 /* 645 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 646 * contain the pointer to the queued pwq. Once execution starts, the flag 647 * is cleared and the high bits contain OFFQ flags and pool ID. 648 * 649 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 650 * and clear_work_data() can be used to set the pwq, pool or clear 651 * work->data. These functions should only be called while the work is 652 * owned - ie. while the PENDING bit is set. 653 * 654 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 655 * corresponding to a work. Pool is available once the work has been 656 * queued anywhere after initialization until it is sync canceled. pwq is 657 * available only while the work item is queued. 658 * 659 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 660 * canceled. While being canceled, a work item may have its PENDING set 661 * but stay off timer and worklist for arbitrarily long and nobody should 662 * try to steal the PENDING bit. 663 */ 664 static inline void set_work_data(struct work_struct *work, unsigned long data, 665 unsigned long flags) 666 { 667 WARN_ON_ONCE(!work_pending(work)); 668 atomic_long_set(&work->data, data | flags | work_static(work)); 669 } 670 671 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 672 unsigned long extra_flags) 673 { 674 set_work_data(work, (unsigned long)pwq, 675 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 676 } 677 678 static void set_work_pool_and_keep_pending(struct work_struct *work, 679 int pool_id) 680 { 681 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 682 WORK_STRUCT_PENDING); 683 } 684 685 static void set_work_pool_and_clear_pending(struct work_struct *work, 686 int pool_id) 687 { 688 /* 689 * The following wmb is paired with the implied mb in 690 * test_and_set_bit(PENDING) and ensures all updates to @work made 691 * here are visible to and precede any updates by the next PENDING 692 * owner. 693 */ 694 smp_wmb(); 695 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 696 /* 697 * The following mb guarantees that previous clear of a PENDING bit 698 * will not be reordered with any speculative LOADS or STORES from 699 * work->current_func, which is executed afterwards. This possible 700 * reordering can lead to a missed execution on attempt to queue 701 * the same @work. E.g. consider this case: 702 * 703 * CPU#0 CPU#1 704 * ---------------------------- -------------------------------- 705 * 706 * 1 STORE event_indicated 707 * 2 queue_work_on() { 708 * 3 test_and_set_bit(PENDING) 709 * 4 } set_..._and_clear_pending() { 710 * 5 set_work_data() # clear bit 711 * 6 smp_mb() 712 * 7 work->current_func() { 713 * 8 LOAD event_indicated 714 * } 715 * 716 * Without an explicit full barrier speculative LOAD on line 8 can 717 * be executed before CPU#0 does STORE on line 1. If that happens, 718 * CPU#0 observes the PENDING bit is still set and new execution of 719 * a @work is not queued in a hope, that CPU#1 will eventually 720 * finish the queued @work. Meanwhile CPU#1 does not see 721 * event_indicated is set, because speculative LOAD was executed 722 * before actual STORE. 723 */ 724 smp_mb(); 725 } 726 727 static void clear_work_data(struct work_struct *work) 728 { 729 smp_wmb(); /* see set_work_pool_and_clear_pending() */ 730 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 731 } 732 733 static inline struct pool_workqueue *work_struct_pwq(unsigned long data) 734 { 735 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK); 736 } 737 738 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 739 { 740 unsigned long data = atomic_long_read(&work->data); 741 742 if (data & WORK_STRUCT_PWQ) 743 return work_struct_pwq(data); 744 else 745 return NULL; 746 } 747 748 /** 749 * get_work_pool - return the worker_pool a given work was associated with 750 * @work: the work item of interest 751 * 752 * Pools are created and destroyed under wq_pool_mutex, and allows read 753 * access under RCU read lock. As such, this function should be 754 * called under wq_pool_mutex or inside of a rcu_read_lock() region. 755 * 756 * All fields of the returned pool are accessible as long as the above 757 * mentioned locking is in effect. If the returned pool needs to be used 758 * beyond the critical section, the caller is responsible for ensuring the 759 * returned pool is and stays online. 760 * 761 * Return: The worker_pool @work was last associated with. %NULL if none. 762 */ 763 static struct worker_pool *get_work_pool(struct work_struct *work) 764 { 765 unsigned long data = atomic_long_read(&work->data); 766 int pool_id; 767 768 assert_rcu_or_pool_mutex(); 769 770 if (data & WORK_STRUCT_PWQ) 771 return work_struct_pwq(data)->pool; 772 773 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 774 if (pool_id == WORK_OFFQ_POOL_NONE) 775 return NULL; 776 777 return idr_find(&worker_pool_idr, pool_id); 778 } 779 780 /** 781 * get_work_pool_id - return the worker pool ID a given work is associated with 782 * @work: the work item of interest 783 * 784 * Return: The worker_pool ID @work was last associated with. 785 * %WORK_OFFQ_POOL_NONE if none. 786 */ 787 static int get_work_pool_id(struct work_struct *work) 788 { 789 unsigned long data = atomic_long_read(&work->data); 790 791 if (data & WORK_STRUCT_PWQ) 792 return work_struct_pwq(data)->pool->id; 793 794 return data >> WORK_OFFQ_POOL_SHIFT; 795 } 796 797 static void mark_work_canceling(struct work_struct *work) 798 { 799 unsigned long pool_id = get_work_pool_id(work); 800 801 pool_id <<= WORK_OFFQ_POOL_SHIFT; 802 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 803 } 804 805 static bool work_is_canceling(struct work_struct *work) 806 { 807 unsigned long data = atomic_long_read(&work->data); 808 809 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 810 } 811 812 /* 813 * Policy functions. These define the policies on how the global worker 814 * pools are managed. Unless noted otherwise, these functions assume that 815 * they're being called with pool->lock held. 816 */ 817 818 /* 819 * Need to wake up a worker? Called from anything but currently 820 * running workers. 821 * 822 * Note that, because unbound workers never contribute to nr_running, this 823 * function will always return %true for unbound pools as long as the 824 * worklist isn't empty. 825 */ 826 static bool need_more_worker(struct worker_pool *pool) 827 { 828 return !list_empty(&pool->worklist) && !pool->nr_running; 829 } 830 831 /* Can I start working? Called from busy but !running workers. */ 832 static bool may_start_working(struct worker_pool *pool) 833 { 834 return pool->nr_idle; 835 } 836 837 /* Do I need to keep working? Called from currently running workers. */ 838 static bool keep_working(struct worker_pool *pool) 839 { 840 return !list_empty(&pool->worklist) && (pool->nr_running <= 1); 841 } 842 843 /* Do we need a new worker? Called from manager. */ 844 static bool need_to_create_worker(struct worker_pool *pool) 845 { 846 return need_more_worker(pool) && !may_start_working(pool); 847 } 848 849 /* Do we have too many workers and should some go away? */ 850 static bool too_many_workers(struct worker_pool *pool) 851 { 852 bool managing = pool->flags & POOL_MANAGER_ACTIVE; 853 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 854 int nr_busy = pool->nr_workers - nr_idle; 855 856 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 857 } 858 859 /** 860 * worker_set_flags - set worker flags and adjust nr_running accordingly 861 * @worker: self 862 * @flags: flags to set 863 * 864 * Set @flags in @worker->flags and adjust nr_running accordingly. 865 */ 866 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 867 { 868 struct worker_pool *pool = worker->pool; 869 870 lockdep_assert_held(&pool->lock); 871 872 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 873 if ((flags & WORKER_NOT_RUNNING) && 874 !(worker->flags & WORKER_NOT_RUNNING)) { 875 pool->nr_running--; 876 } 877 878 worker->flags |= flags; 879 } 880 881 /** 882 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 883 * @worker: self 884 * @flags: flags to clear 885 * 886 * Clear @flags in @worker->flags and adjust nr_running accordingly. 887 */ 888 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 889 { 890 struct worker_pool *pool = worker->pool; 891 unsigned int oflags = worker->flags; 892 893 lockdep_assert_held(&pool->lock); 894 895 worker->flags &= ~flags; 896 897 /* 898 * If transitioning out of NOT_RUNNING, increment nr_running. Note 899 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 900 * of multiple flags, not a single flag. 901 */ 902 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 903 if (!(worker->flags & WORKER_NOT_RUNNING)) 904 pool->nr_running++; 905 } 906 907 /* Return the first idle worker. Called with pool->lock held. */ 908 static struct worker *first_idle_worker(struct worker_pool *pool) 909 { 910 if (unlikely(list_empty(&pool->idle_list))) 911 return NULL; 912 913 return list_first_entry(&pool->idle_list, struct worker, entry); 914 } 915 916 /** 917 * worker_enter_idle - enter idle state 918 * @worker: worker which is entering idle state 919 * 920 * @worker is entering idle state. Update stats and idle timer if 921 * necessary. 922 * 923 * LOCKING: 924 * raw_spin_lock_irq(pool->lock). 925 */ 926 static void worker_enter_idle(struct worker *worker) 927 { 928 struct worker_pool *pool = worker->pool; 929 930 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 931 WARN_ON_ONCE(!list_empty(&worker->entry) && 932 (worker->hentry.next || worker->hentry.pprev))) 933 return; 934 935 /* can't use worker_set_flags(), also called from create_worker() */ 936 worker->flags |= WORKER_IDLE; 937 pool->nr_idle++; 938 worker->last_active = jiffies; 939 940 /* idle_list is LIFO */ 941 list_add(&worker->entry, &pool->idle_list); 942 943 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 944 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 945 946 /* Sanity check nr_running. */ 947 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running); 948 } 949 950 /** 951 * worker_leave_idle - leave idle state 952 * @worker: worker which is leaving idle state 953 * 954 * @worker is leaving idle state. Update stats. 955 * 956 * LOCKING: 957 * raw_spin_lock_irq(pool->lock). 958 */ 959 static void worker_leave_idle(struct worker *worker) 960 { 961 struct worker_pool *pool = worker->pool; 962 963 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 964 return; 965 worker_clr_flags(worker, WORKER_IDLE); 966 pool->nr_idle--; 967 list_del_init(&worker->entry); 968 } 969 970 /** 971 * find_worker_executing_work - find worker which is executing a work 972 * @pool: pool of interest 973 * @work: work to find worker for 974 * 975 * Find a worker which is executing @work on @pool by searching 976 * @pool->busy_hash which is keyed by the address of @work. For a worker 977 * to match, its current execution should match the address of @work and 978 * its work function. This is to avoid unwanted dependency between 979 * unrelated work executions through a work item being recycled while still 980 * being executed. 981 * 982 * This is a bit tricky. A work item may be freed once its execution 983 * starts and nothing prevents the freed area from being recycled for 984 * another work item. If the same work item address ends up being reused 985 * before the original execution finishes, workqueue will identify the 986 * recycled work item as currently executing and make it wait until the 987 * current execution finishes, introducing an unwanted dependency. 988 * 989 * This function checks the work item address and work function to avoid 990 * false positives. Note that this isn't complete as one may construct a 991 * work function which can introduce dependency onto itself through a 992 * recycled work item. Well, if somebody wants to shoot oneself in the 993 * foot that badly, there's only so much we can do, and if such deadlock 994 * actually occurs, it should be easy to locate the culprit work function. 995 * 996 * CONTEXT: 997 * raw_spin_lock_irq(pool->lock). 998 * 999 * Return: 1000 * Pointer to worker which is executing @work if found, %NULL 1001 * otherwise. 1002 */ 1003 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1004 struct work_struct *work) 1005 { 1006 struct worker *worker; 1007 1008 hash_for_each_possible(pool->busy_hash, worker, hentry, 1009 (unsigned long)work) 1010 if (worker->current_work == work && 1011 worker->current_func == work->func) 1012 return worker; 1013 1014 return NULL; 1015 } 1016 1017 /** 1018 * move_linked_works - move linked works to a list 1019 * @work: start of series of works to be scheduled 1020 * @head: target list to append @work to 1021 * @nextp: out parameter for nested worklist walking 1022 * 1023 * Schedule linked works starting from @work to @head. Work series to be 1024 * scheduled starts at @work and includes any consecutive work with 1025 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on 1026 * @nextp. 1027 * 1028 * CONTEXT: 1029 * raw_spin_lock_irq(pool->lock). 1030 */ 1031 static void move_linked_works(struct work_struct *work, struct list_head *head, 1032 struct work_struct **nextp) 1033 { 1034 struct work_struct *n; 1035 1036 /* 1037 * Linked worklist will always end before the end of the list, 1038 * use NULL for list head. 1039 */ 1040 list_for_each_entry_safe_from(work, n, NULL, entry) { 1041 list_move_tail(&work->entry, head); 1042 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1043 break; 1044 } 1045 1046 /* 1047 * If we're already inside safe list traversal and have moved 1048 * multiple works to the scheduled queue, the next position 1049 * needs to be updated. 1050 */ 1051 if (nextp) 1052 *nextp = n; 1053 } 1054 1055 /** 1056 * assign_work - assign a work item and its linked work items to a worker 1057 * @work: work to assign 1058 * @worker: worker to assign to 1059 * @nextp: out parameter for nested worklist walking 1060 * 1061 * Assign @work and its linked work items to @worker. If @work is already being 1062 * executed by another worker in the same pool, it'll be punted there. 1063 * 1064 * If @nextp is not NULL, it's updated to point to the next work of the last 1065 * scheduled work. This allows assign_work() to be nested inside 1066 * list_for_each_entry_safe(). 1067 * 1068 * Returns %true if @work was successfully assigned to @worker. %false if @work 1069 * was punted to another worker already executing it. 1070 */ 1071 static bool assign_work(struct work_struct *work, struct worker *worker, 1072 struct work_struct **nextp) 1073 { 1074 struct worker_pool *pool = worker->pool; 1075 struct worker *collision; 1076 1077 lockdep_assert_held(&pool->lock); 1078 1079 /* 1080 * A single work shouldn't be executed concurrently by multiple workers. 1081 * __queue_work() ensures that @work doesn't jump to a different pool 1082 * while still running in the previous pool. Here, we should ensure that 1083 * @work is not executed concurrently by multiple workers from the same 1084 * pool. Check whether anyone is already processing the work. If so, 1085 * defer the work to the currently executing one. 1086 */ 1087 collision = find_worker_executing_work(pool, work); 1088 if (unlikely(collision)) { 1089 move_linked_works(work, &collision->scheduled, nextp); 1090 return false; 1091 } 1092 1093 move_linked_works(work, &worker->scheduled, nextp); 1094 return true; 1095 } 1096 1097 /** 1098 * kick_pool - wake up an idle worker if necessary 1099 * @pool: pool to kick 1100 * 1101 * @pool may have pending work items. Wake up worker if necessary. Returns 1102 * whether a worker was woken up. 1103 */ 1104 static bool kick_pool(struct worker_pool *pool) 1105 { 1106 struct worker *worker = first_idle_worker(pool); 1107 struct task_struct *p; 1108 1109 lockdep_assert_held(&pool->lock); 1110 1111 if (!need_more_worker(pool) || !worker) 1112 return false; 1113 1114 p = worker->task; 1115 1116 #ifdef CONFIG_SMP 1117 /* 1118 * Idle @worker is about to execute @work and waking up provides an 1119 * opportunity to migrate @worker at a lower cost by setting the task's 1120 * wake_cpu field. Let's see if we want to move @worker to improve 1121 * execution locality. 1122 * 1123 * We're waking the worker that went idle the latest and there's some 1124 * chance that @worker is marked idle but hasn't gone off CPU yet. If 1125 * so, setting the wake_cpu won't do anything. As this is a best-effort 1126 * optimization and the race window is narrow, let's leave as-is for 1127 * now. If this becomes pronounced, we can skip over workers which are 1128 * still on cpu when picking an idle worker. 1129 * 1130 * If @pool has non-strict affinity, @worker might have ended up outside 1131 * its affinity scope. Repatriate. 1132 */ 1133 if (!pool->attrs->affn_strict && 1134 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { 1135 struct work_struct *work = list_first_entry(&pool->worklist, 1136 struct work_struct, entry); 1137 p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); 1138 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; 1139 } 1140 #endif 1141 wake_up_process(p); 1142 return true; 1143 } 1144 1145 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT 1146 1147 /* 1148 * Concurrency-managed per-cpu work items that hog CPU for longer than 1149 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism, 1150 * which prevents them from stalling other concurrency-managed work items. If a 1151 * work function keeps triggering this mechanism, it's likely that the work item 1152 * should be using an unbound workqueue instead. 1153 * 1154 * wq_cpu_intensive_report() tracks work functions which trigger such conditions 1155 * and report them so that they can be examined and converted to use unbound 1156 * workqueues as appropriate. To avoid flooding the console, each violating work 1157 * function is tracked and reported with exponential backoff. 1158 */ 1159 #define WCI_MAX_ENTS 128 1160 1161 struct wci_ent { 1162 work_func_t func; 1163 atomic64_t cnt; 1164 struct hlist_node hash_node; 1165 }; 1166 1167 static struct wci_ent wci_ents[WCI_MAX_ENTS]; 1168 static int wci_nr_ents; 1169 static DEFINE_RAW_SPINLOCK(wci_lock); 1170 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS)); 1171 1172 static struct wci_ent *wci_find_ent(work_func_t func) 1173 { 1174 struct wci_ent *ent; 1175 1176 hash_for_each_possible_rcu(wci_hash, ent, hash_node, 1177 (unsigned long)func) { 1178 if (ent->func == func) 1179 return ent; 1180 } 1181 return NULL; 1182 } 1183 1184 static void wq_cpu_intensive_report(work_func_t func) 1185 { 1186 struct wci_ent *ent; 1187 1188 restart: 1189 ent = wci_find_ent(func); 1190 if (ent) { 1191 u64 cnt; 1192 1193 /* 1194 * Start reporting from the fourth time and back off 1195 * exponentially. 1196 */ 1197 cnt = atomic64_inc_return_relaxed(&ent->cnt); 1198 if (cnt >= 4 && is_power_of_2(cnt)) 1199 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n", 1200 ent->func, wq_cpu_intensive_thresh_us, 1201 atomic64_read(&ent->cnt)); 1202 return; 1203 } 1204 1205 /* 1206 * @func is a new violation. Allocate a new entry for it. If wcn_ents[] 1207 * is exhausted, something went really wrong and we probably made enough 1208 * noise already. 1209 */ 1210 if (wci_nr_ents >= WCI_MAX_ENTS) 1211 return; 1212 1213 raw_spin_lock(&wci_lock); 1214 1215 if (wci_nr_ents >= WCI_MAX_ENTS) { 1216 raw_spin_unlock(&wci_lock); 1217 return; 1218 } 1219 1220 if (wci_find_ent(func)) { 1221 raw_spin_unlock(&wci_lock); 1222 goto restart; 1223 } 1224 1225 ent = &wci_ents[wci_nr_ents++]; 1226 ent->func = func; 1227 atomic64_set(&ent->cnt, 1); 1228 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func); 1229 1230 raw_spin_unlock(&wci_lock); 1231 } 1232 1233 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1234 static void wq_cpu_intensive_report(work_func_t func) {} 1235 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ 1236 1237 /** 1238 * wq_worker_running - a worker is running again 1239 * @task: task waking up 1240 * 1241 * This function is called when a worker returns from schedule() 1242 */ 1243 void wq_worker_running(struct task_struct *task) 1244 { 1245 struct worker *worker = kthread_data(task); 1246 1247 if (!READ_ONCE(worker->sleeping)) 1248 return; 1249 1250 /* 1251 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check 1252 * and the nr_running increment below, we may ruin the nr_running reset 1253 * and leave with an unexpected pool->nr_running == 1 on the newly unbound 1254 * pool. Protect against such race. 1255 */ 1256 preempt_disable(); 1257 if (!(worker->flags & WORKER_NOT_RUNNING)) 1258 worker->pool->nr_running++; 1259 preempt_enable(); 1260 1261 /* 1262 * CPU intensive auto-detection cares about how long a work item hogged 1263 * CPU without sleeping. Reset the starting timestamp on wakeup. 1264 */ 1265 worker->current_at = worker->task->se.sum_exec_runtime; 1266 1267 WRITE_ONCE(worker->sleeping, 0); 1268 } 1269 1270 /** 1271 * wq_worker_sleeping - a worker is going to sleep 1272 * @task: task going to sleep 1273 * 1274 * This function is called from schedule() when a busy worker is 1275 * going to sleep. 1276 */ 1277 void wq_worker_sleeping(struct task_struct *task) 1278 { 1279 struct worker *worker = kthread_data(task); 1280 struct worker_pool *pool; 1281 1282 /* 1283 * Rescuers, which may not have all the fields set up like normal 1284 * workers, also reach here, let's not access anything before 1285 * checking NOT_RUNNING. 1286 */ 1287 if (worker->flags & WORKER_NOT_RUNNING) 1288 return; 1289 1290 pool = worker->pool; 1291 1292 /* Return if preempted before wq_worker_running() was reached */ 1293 if (READ_ONCE(worker->sleeping)) 1294 return; 1295 1296 WRITE_ONCE(worker->sleeping, 1); 1297 raw_spin_lock_irq(&pool->lock); 1298 1299 /* 1300 * Recheck in case unbind_workers() preempted us. We don't 1301 * want to decrement nr_running after the worker is unbound 1302 * and nr_running has been reset. 1303 */ 1304 if (worker->flags & WORKER_NOT_RUNNING) { 1305 raw_spin_unlock_irq(&pool->lock); 1306 return; 1307 } 1308 1309 pool->nr_running--; 1310 if (kick_pool(pool)) 1311 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1312 1313 raw_spin_unlock_irq(&pool->lock); 1314 } 1315 1316 /** 1317 * wq_worker_tick - a scheduler tick occurred while a kworker is running 1318 * @task: task currently running 1319 * 1320 * Called from scheduler_tick(). We're in the IRQ context and the current 1321 * worker's fields which follow the 'K' locking rule can be accessed safely. 1322 */ 1323 void wq_worker_tick(struct task_struct *task) 1324 { 1325 struct worker *worker = kthread_data(task); 1326 struct pool_workqueue *pwq = worker->current_pwq; 1327 struct worker_pool *pool = worker->pool; 1328 1329 if (!pwq) 1330 return; 1331 1332 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC; 1333 1334 if (!wq_cpu_intensive_thresh_us) 1335 return; 1336 1337 /* 1338 * If the current worker is concurrency managed and hogged the CPU for 1339 * longer than wq_cpu_intensive_thresh_us, it's automatically marked 1340 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. 1341 * 1342 * Set @worker->sleeping means that @worker is in the process of 1343 * switching out voluntarily and won't be contributing to 1344 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also 1345 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to 1346 * double decrements. The task is releasing the CPU anyway. Let's skip. 1347 * We probably want to make this prettier in the future. 1348 */ 1349 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) || 1350 worker->task->se.sum_exec_runtime - worker->current_at < 1351 wq_cpu_intensive_thresh_us * NSEC_PER_USEC) 1352 return; 1353 1354 raw_spin_lock(&pool->lock); 1355 1356 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 1357 wq_cpu_intensive_report(worker->current_func); 1358 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; 1359 1360 if (kick_pool(pool)) 1361 pwq->stats[PWQ_STAT_CM_WAKEUP]++; 1362 1363 raw_spin_unlock(&pool->lock); 1364 } 1365 1366 /** 1367 * wq_worker_last_func - retrieve worker's last work function 1368 * @task: Task to retrieve last work function of. 1369 * 1370 * Determine the last function a worker executed. This is called from 1371 * the scheduler to get a worker's last known identity. 1372 * 1373 * CONTEXT: 1374 * raw_spin_lock_irq(rq->lock) 1375 * 1376 * This function is called during schedule() when a kworker is going 1377 * to sleep. It's used by psi to identify aggregation workers during 1378 * dequeuing, to allow periodic aggregation to shut-off when that 1379 * worker is the last task in the system or cgroup to go to sleep. 1380 * 1381 * As this function doesn't involve any workqueue-related locking, it 1382 * only returns stable values when called from inside the scheduler's 1383 * queuing and dequeuing paths, when @task, which must be a kworker, 1384 * is guaranteed to not be processing any works. 1385 * 1386 * Return: 1387 * The last work function %current executed as a worker, NULL if it 1388 * hasn't executed any work yet. 1389 */ 1390 work_func_t wq_worker_last_func(struct task_struct *task) 1391 { 1392 struct worker *worker = kthread_data(task); 1393 1394 return worker->last_func; 1395 } 1396 1397 /** 1398 * get_pwq - get an extra reference on the specified pool_workqueue 1399 * @pwq: pool_workqueue to get 1400 * 1401 * Obtain an extra reference on @pwq. The caller should guarantee that 1402 * @pwq has positive refcnt and be holding the matching pool->lock. 1403 */ 1404 static void get_pwq(struct pool_workqueue *pwq) 1405 { 1406 lockdep_assert_held(&pwq->pool->lock); 1407 WARN_ON_ONCE(pwq->refcnt <= 0); 1408 pwq->refcnt++; 1409 } 1410 1411 /** 1412 * put_pwq - put a pool_workqueue reference 1413 * @pwq: pool_workqueue to put 1414 * 1415 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1416 * destruction. The caller should be holding the matching pool->lock. 1417 */ 1418 static void put_pwq(struct pool_workqueue *pwq) 1419 { 1420 lockdep_assert_held(&pwq->pool->lock); 1421 if (likely(--pwq->refcnt)) 1422 return; 1423 /* 1424 * @pwq can't be released under pool->lock, bounce to a dedicated 1425 * kthread_worker to avoid A-A deadlocks. 1426 */ 1427 kthread_queue_work(pwq_release_worker, &pwq->release_work); 1428 } 1429 1430 /** 1431 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1432 * @pwq: pool_workqueue to put (can be %NULL) 1433 * 1434 * put_pwq() with locking. This function also allows %NULL @pwq. 1435 */ 1436 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1437 { 1438 if (pwq) { 1439 /* 1440 * As both pwqs and pools are RCU protected, the 1441 * following lock operations are safe. 1442 */ 1443 raw_spin_lock_irq(&pwq->pool->lock); 1444 put_pwq(pwq); 1445 raw_spin_unlock_irq(&pwq->pool->lock); 1446 } 1447 } 1448 1449 static void pwq_activate_inactive_work(struct work_struct *work) 1450 { 1451 struct pool_workqueue *pwq = get_work_pwq(work); 1452 1453 trace_workqueue_activate_work(work); 1454 if (list_empty(&pwq->pool->worklist)) 1455 pwq->pool->watchdog_ts = jiffies; 1456 move_linked_works(work, &pwq->pool->worklist, NULL); 1457 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); 1458 pwq->nr_active++; 1459 } 1460 1461 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) 1462 { 1463 struct work_struct *work = list_first_entry(&pwq->inactive_works, 1464 struct work_struct, entry); 1465 1466 pwq_activate_inactive_work(work); 1467 } 1468 1469 /** 1470 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1471 * @pwq: pwq of interest 1472 * @work_data: work_data of work which left the queue 1473 * 1474 * A work either has completed or is removed from pending queue, 1475 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1476 * 1477 * CONTEXT: 1478 * raw_spin_lock_irq(pool->lock). 1479 */ 1480 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) 1481 { 1482 int color = get_work_color(work_data); 1483 1484 if (!(work_data & WORK_STRUCT_INACTIVE)) { 1485 pwq->nr_active--; 1486 if (!list_empty(&pwq->inactive_works)) { 1487 /* one down, submit an inactive one */ 1488 if (pwq->nr_active < pwq->max_active) 1489 pwq_activate_first_inactive(pwq); 1490 } 1491 } 1492 1493 pwq->nr_in_flight[color]--; 1494 1495 /* is flush in progress and are we at the flushing tip? */ 1496 if (likely(pwq->flush_color != color)) 1497 goto out_put; 1498 1499 /* are there still in-flight works? */ 1500 if (pwq->nr_in_flight[color]) 1501 goto out_put; 1502 1503 /* this pwq is done, clear flush_color */ 1504 pwq->flush_color = -1; 1505 1506 /* 1507 * If this was the last pwq, wake up the first flusher. It 1508 * will handle the rest. 1509 */ 1510 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 1511 complete(&pwq->wq->first_flusher->done); 1512 out_put: 1513 put_pwq(pwq); 1514 } 1515 1516 /** 1517 * try_to_grab_pending - steal work item from worklist and disable irq 1518 * @work: work item to steal 1519 * @is_dwork: @work is a delayed_work 1520 * @flags: place to store irq state 1521 * 1522 * Try to grab PENDING bit of @work. This function can handle @work in any 1523 * stable state - idle, on timer or on worklist. 1524 * 1525 * Return: 1526 * 1527 * ======== ================================================================ 1528 * 1 if @work was pending and we successfully stole PENDING 1529 * 0 if @work was idle and we claimed PENDING 1530 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 1531 * -ENOENT if someone else is canceling @work, this state may persist 1532 * for arbitrarily long 1533 * ======== ================================================================ 1534 * 1535 * Note: 1536 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1537 * interrupted while holding PENDING and @work off queue, irq must be 1538 * disabled on entry. This, combined with delayed_work->timer being 1539 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 1540 * 1541 * On successful return, >= 0, irq is disabled and the caller is 1542 * responsible for releasing it using local_irq_restore(*@flags). 1543 * 1544 * This function is safe to call from any context including IRQ handler. 1545 */ 1546 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 1547 unsigned long *flags) 1548 { 1549 struct worker_pool *pool; 1550 struct pool_workqueue *pwq; 1551 1552 local_irq_save(*flags); 1553 1554 /* try to steal the timer if it exists */ 1555 if (is_dwork) { 1556 struct delayed_work *dwork = to_delayed_work(work); 1557 1558 /* 1559 * dwork->timer is irqsafe. If del_timer() fails, it's 1560 * guaranteed that the timer is not queued anywhere and not 1561 * running on the local CPU. 1562 */ 1563 if (likely(del_timer(&dwork->timer))) 1564 return 1; 1565 } 1566 1567 /* try to claim PENDING the normal way */ 1568 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 1569 return 0; 1570 1571 rcu_read_lock(); 1572 /* 1573 * The queueing is in progress, or it is already queued. Try to 1574 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 1575 */ 1576 pool = get_work_pool(work); 1577 if (!pool) 1578 goto fail; 1579 1580 raw_spin_lock(&pool->lock); 1581 /* 1582 * work->data is guaranteed to point to pwq only while the work 1583 * item is queued on pwq->wq, and both updating work->data to point 1584 * to pwq on queueing and to pool on dequeueing are done under 1585 * pwq->pool->lock. This in turn guarantees that, if work->data 1586 * points to pwq which is associated with a locked pool, the work 1587 * item is currently queued on that pool. 1588 */ 1589 pwq = get_work_pwq(work); 1590 if (pwq && pwq->pool == pool) { 1591 debug_work_deactivate(work); 1592 1593 /* 1594 * A cancelable inactive work item must be in the 1595 * pwq->inactive_works since a queued barrier can't be 1596 * canceled (see the comments in insert_wq_barrier()). 1597 * 1598 * An inactive work item cannot be grabbed directly because 1599 * it might have linked barrier work items which, if left 1600 * on the inactive_works list, will confuse pwq->nr_active 1601 * management later on and cause stall. Make sure the work 1602 * item is activated before grabbing. 1603 */ 1604 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) 1605 pwq_activate_inactive_work(work); 1606 1607 list_del_init(&work->entry); 1608 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); 1609 1610 /* work->data points to pwq iff queued, point to pool */ 1611 set_work_pool_and_keep_pending(work, pool->id); 1612 1613 raw_spin_unlock(&pool->lock); 1614 rcu_read_unlock(); 1615 return 1; 1616 } 1617 raw_spin_unlock(&pool->lock); 1618 fail: 1619 rcu_read_unlock(); 1620 local_irq_restore(*flags); 1621 if (work_is_canceling(work)) 1622 return -ENOENT; 1623 cpu_relax(); 1624 return -EAGAIN; 1625 } 1626 1627 /** 1628 * insert_work - insert a work into a pool 1629 * @pwq: pwq @work belongs to 1630 * @work: work to insert 1631 * @head: insertion point 1632 * @extra_flags: extra WORK_STRUCT_* flags to set 1633 * 1634 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1635 * work_struct flags. 1636 * 1637 * CONTEXT: 1638 * raw_spin_lock_irq(pool->lock). 1639 */ 1640 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 1641 struct list_head *head, unsigned int extra_flags) 1642 { 1643 debug_work_activate(work); 1644 1645 /* record the work call stack in order to print it in KASAN reports */ 1646 kasan_record_aux_stack_noalloc(work); 1647 1648 /* we own @work, set data and link */ 1649 set_work_pwq(work, pwq, extra_flags); 1650 list_add_tail(&work->entry, head); 1651 get_pwq(pwq); 1652 } 1653 1654 /* 1655 * Test whether @work is being queued from another work executing on the 1656 * same workqueue. 1657 */ 1658 static bool is_chained_work(struct workqueue_struct *wq) 1659 { 1660 struct worker *worker; 1661 1662 worker = current_wq_worker(); 1663 /* 1664 * Return %true iff I'm a worker executing a work item on @wq. If 1665 * I'm @worker, it's safe to dereference it without locking. 1666 */ 1667 return worker && worker->current_pwq->wq == wq; 1668 } 1669 1670 /* 1671 * When queueing an unbound work item to a wq, prefer local CPU if allowed 1672 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 1673 * avoid perturbing sensitive tasks. 1674 */ 1675 static int wq_select_unbound_cpu(int cpu) 1676 { 1677 int new_cpu; 1678 1679 if (likely(!wq_debug_force_rr_cpu)) { 1680 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 1681 return cpu; 1682 } else { 1683 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n"); 1684 } 1685 1686 if (cpumask_empty(wq_unbound_cpumask)) 1687 return cpu; 1688 1689 new_cpu = __this_cpu_read(wq_rr_cpu_last); 1690 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 1691 if (unlikely(new_cpu >= nr_cpu_ids)) { 1692 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 1693 if (unlikely(new_cpu >= nr_cpu_ids)) 1694 return cpu; 1695 } 1696 __this_cpu_write(wq_rr_cpu_last, new_cpu); 1697 1698 return new_cpu; 1699 } 1700 1701 static void __queue_work(int cpu, struct workqueue_struct *wq, 1702 struct work_struct *work) 1703 { 1704 struct pool_workqueue *pwq; 1705 struct worker_pool *last_pool, *pool; 1706 unsigned int work_flags; 1707 unsigned int req_cpu = cpu; 1708 1709 /* 1710 * While a work item is PENDING && off queue, a task trying to 1711 * steal the PENDING will busy-loop waiting for it to either get 1712 * queued or lose PENDING. Grabbing PENDING and queueing should 1713 * happen with IRQ disabled. 1714 */ 1715 lockdep_assert_irqs_disabled(); 1716 1717 1718 /* 1719 * For a draining wq, only works from the same workqueue are 1720 * allowed. The __WQ_DESTROYING helps to spot the issue that 1721 * queues a new work item to a wq after destroy_workqueue(wq). 1722 */ 1723 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && 1724 WARN_ON_ONCE(!is_chained_work(wq)))) 1725 return; 1726 rcu_read_lock(); 1727 retry: 1728 /* pwq which will be used unless @work is executing elsewhere */ 1729 if (req_cpu == WORK_CPU_UNBOUND) { 1730 if (wq->flags & WQ_UNBOUND) 1731 cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 1732 else 1733 cpu = raw_smp_processor_id(); 1734 } 1735 1736 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu)); 1737 pool = pwq->pool; 1738 1739 /* 1740 * If @work was previously on a different pool, it might still be 1741 * running there, in which case the work needs to be queued on that 1742 * pool to guarantee non-reentrancy. 1743 */ 1744 last_pool = get_work_pool(work); 1745 if (last_pool && last_pool != pool) { 1746 struct worker *worker; 1747 1748 raw_spin_lock(&last_pool->lock); 1749 1750 worker = find_worker_executing_work(last_pool, work); 1751 1752 if (worker && worker->current_pwq->wq == wq) { 1753 pwq = worker->current_pwq; 1754 pool = pwq->pool; 1755 WARN_ON_ONCE(pool != last_pool); 1756 } else { 1757 /* meh... not running there, queue here */ 1758 raw_spin_unlock(&last_pool->lock); 1759 raw_spin_lock(&pool->lock); 1760 } 1761 } else { 1762 raw_spin_lock(&pool->lock); 1763 } 1764 1765 /* 1766 * pwq is determined and locked. For unbound pools, we could have raced 1767 * with pwq release and it could already be dead. If its refcnt is zero, 1768 * repeat pwq selection. Note that unbound pwqs never die without 1769 * another pwq replacing it in cpu_pwq or while work items are executing 1770 * on it, so the retrying is guaranteed to make forward-progress. 1771 */ 1772 if (unlikely(!pwq->refcnt)) { 1773 if (wq->flags & WQ_UNBOUND) { 1774 raw_spin_unlock(&pool->lock); 1775 cpu_relax(); 1776 goto retry; 1777 } 1778 /* oops */ 1779 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 1780 wq->name, cpu); 1781 } 1782 1783 /* pwq determined, queue */ 1784 trace_workqueue_queue_work(req_cpu, pwq, work); 1785 1786 if (WARN_ON(!list_empty(&work->entry))) 1787 goto out; 1788 1789 pwq->nr_in_flight[pwq->work_color]++; 1790 work_flags = work_color_to_flags(pwq->work_color); 1791 1792 if (likely(pwq->nr_active < pwq->max_active)) { 1793 if (list_empty(&pool->worklist)) 1794 pool->watchdog_ts = jiffies; 1795 1796 trace_workqueue_activate_work(work); 1797 pwq->nr_active++; 1798 insert_work(pwq, work, &pool->worklist, work_flags); 1799 kick_pool(pool); 1800 } else { 1801 work_flags |= WORK_STRUCT_INACTIVE; 1802 insert_work(pwq, work, &pwq->inactive_works, work_flags); 1803 } 1804 1805 out: 1806 raw_spin_unlock(&pool->lock); 1807 rcu_read_unlock(); 1808 } 1809 1810 /** 1811 * queue_work_on - queue work on specific cpu 1812 * @cpu: CPU number to execute work on 1813 * @wq: workqueue to use 1814 * @work: work to queue 1815 * 1816 * We queue the work to a specific CPU, the caller must ensure it 1817 * can't go away. Callers that fail to ensure that the specified 1818 * CPU cannot go away will execute on a randomly chosen CPU. 1819 * But note well that callers specifying a CPU that never has been 1820 * online will get a splat. 1821 * 1822 * Return: %false if @work was already on a queue, %true otherwise. 1823 */ 1824 bool queue_work_on(int cpu, struct workqueue_struct *wq, 1825 struct work_struct *work) 1826 { 1827 bool ret = false; 1828 unsigned long flags; 1829 1830 local_irq_save(flags); 1831 1832 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1833 __queue_work(cpu, wq, work); 1834 ret = true; 1835 } 1836 1837 local_irq_restore(flags); 1838 return ret; 1839 } 1840 EXPORT_SYMBOL(queue_work_on); 1841 1842 /** 1843 * select_numa_node_cpu - Select a CPU based on NUMA node 1844 * @node: NUMA node ID that we want to select a CPU from 1845 * 1846 * This function will attempt to find a "random" cpu available on a given 1847 * node. If there are no CPUs available on the given node it will return 1848 * WORK_CPU_UNBOUND indicating that we should just schedule to any 1849 * available CPU if we need to schedule this work. 1850 */ 1851 static int select_numa_node_cpu(int node) 1852 { 1853 int cpu; 1854 1855 /* Delay binding to CPU if node is not valid or online */ 1856 if (node < 0 || node >= MAX_NUMNODES || !node_online(node)) 1857 return WORK_CPU_UNBOUND; 1858 1859 /* Use local node/cpu if we are already there */ 1860 cpu = raw_smp_processor_id(); 1861 if (node == cpu_to_node(cpu)) 1862 return cpu; 1863 1864 /* Use "random" otherwise know as "first" online CPU of node */ 1865 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); 1866 1867 /* If CPU is valid return that, otherwise just defer */ 1868 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND; 1869 } 1870 1871 /** 1872 * queue_work_node - queue work on a "random" cpu for a given NUMA node 1873 * @node: NUMA node that we are targeting the work for 1874 * @wq: workqueue to use 1875 * @work: work to queue 1876 * 1877 * We queue the work to a "random" CPU within a given NUMA node. The basic 1878 * idea here is to provide a way to somehow associate work with a given 1879 * NUMA node. 1880 * 1881 * This function will only make a best effort attempt at getting this onto 1882 * the right NUMA node. If no node is requested or the requested node is 1883 * offline then we just fall back to standard queue_work behavior. 1884 * 1885 * Currently the "random" CPU ends up being the first available CPU in the 1886 * intersection of cpu_online_mask and the cpumask of the node, unless we 1887 * are running on the node. In that case we just use the current CPU. 1888 * 1889 * Return: %false if @work was already on a queue, %true otherwise. 1890 */ 1891 bool queue_work_node(int node, struct workqueue_struct *wq, 1892 struct work_struct *work) 1893 { 1894 unsigned long flags; 1895 bool ret = false; 1896 1897 /* 1898 * This current implementation is specific to unbound workqueues. 1899 * Specifically we only return the first available CPU for a given 1900 * node instead of cycling through individual CPUs within the node. 1901 * 1902 * If this is used with a per-cpu workqueue then the logic in 1903 * workqueue_select_cpu_near would need to be updated to allow for 1904 * some round robin type logic. 1905 */ 1906 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); 1907 1908 local_irq_save(flags); 1909 1910 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1911 int cpu = select_numa_node_cpu(node); 1912 1913 __queue_work(cpu, wq, work); 1914 ret = true; 1915 } 1916 1917 local_irq_restore(flags); 1918 return ret; 1919 } 1920 EXPORT_SYMBOL_GPL(queue_work_node); 1921 1922 void delayed_work_timer_fn(struct timer_list *t) 1923 { 1924 struct delayed_work *dwork = from_timer(dwork, t, timer); 1925 1926 /* should have been called from irqsafe timer with irq already off */ 1927 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 1928 } 1929 EXPORT_SYMBOL(delayed_work_timer_fn); 1930 1931 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 1932 struct delayed_work *dwork, unsigned long delay) 1933 { 1934 struct timer_list *timer = &dwork->timer; 1935 struct work_struct *work = &dwork->work; 1936 1937 WARN_ON_ONCE(!wq); 1938 WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 1939 WARN_ON_ONCE(timer_pending(timer)); 1940 WARN_ON_ONCE(!list_empty(&work->entry)); 1941 1942 /* 1943 * If @delay is 0, queue @dwork->work immediately. This is for 1944 * both optimization and correctness. The earliest @timer can 1945 * expire is on the closest next tick and delayed_work users depend 1946 * on that there's no such delay when @delay is 0. 1947 */ 1948 if (!delay) { 1949 __queue_work(cpu, wq, &dwork->work); 1950 return; 1951 } 1952 1953 dwork->wq = wq; 1954 dwork->cpu = cpu; 1955 timer->expires = jiffies + delay; 1956 1957 if (unlikely(cpu != WORK_CPU_UNBOUND)) 1958 add_timer_on(timer, cpu); 1959 else 1960 add_timer(timer); 1961 } 1962 1963 /** 1964 * queue_delayed_work_on - queue work on specific CPU after delay 1965 * @cpu: CPU number to execute work on 1966 * @wq: workqueue to use 1967 * @dwork: work to queue 1968 * @delay: number of jiffies to wait before queueing 1969 * 1970 * Return: %false if @work was already on a queue, %true otherwise. If 1971 * @delay is zero and @dwork is idle, it will be scheduled for immediate 1972 * execution. 1973 */ 1974 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1975 struct delayed_work *dwork, unsigned long delay) 1976 { 1977 struct work_struct *work = &dwork->work; 1978 bool ret = false; 1979 unsigned long flags; 1980 1981 /* read the comment in __queue_work() */ 1982 local_irq_save(flags); 1983 1984 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1985 __queue_delayed_work(cpu, wq, dwork, delay); 1986 ret = true; 1987 } 1988 1989 local_irq_restore(flags); 1990 return ret; 1991 } 1992 EXPORT_SYMBOL(queue_delayed_work_on); 1993 1994 /** 1995 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1996 * @cpu: CPU number to execute work on 1997 * @wq: workqueue to use 1998 * @dwork: work to queue 1999 * @delay: number of jiffies to wait before queueing 2000 * 2001 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2002 * modify @dwork's timer so that it expires after @delay. If @delay is 2003 * zero, @work is guaranteed to be scheduled immediately regardless of its 2004 * current state. 2005 * 2006 * Return: %false if @dwork was idle and queued, %true if @dwork was 2007 * pending and its timer was modified. 2008 * 2009 * This function is safe to call from any context including IRQ handler. 2010 * See try_to_grab_pending() for details. 2011 */ 2012 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2013 struct delayed_work *dwork, unsigned long delay) 2014 { 2015 unsigned long flags; 2016 int ret; 2017 2018 do { 2019 ret = try_to_grab_pending(&dwork->work, true, &flags); 2020 } while (unlikely(ret == -EAGAIN)); 2021 2022 if (likely(ret >= 0)) { 2023 __queue_delayed_work(cpu, wq, dwork, delay); 2024 local_irq_restore(flags); 2025 } 2026 2027 /* -ENOENT from try_to_grab_pending() becomes %true */ 2028 return ret; 2029 } 2030 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2031 2032 static void rcu_work_rcufn(struct rcu_head *rcu) 2033 { 2034 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu); 2035 2036 /* read the comment in __queue_work() */ 2037 local_irq_disable(); 2038 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); 2039 local_irq_enable(); 2040 } 2041 2042 /** 2043 * queue_rcu_work - queue work after a RCU grace period 2044 * @wq: workqueue to use 2045 * @rwork: work to queue 2046 * 2047 * Return: %false if @rwork was already pending, %true otherwise. Note 2048 * that a full RCU grace period is guaranteed only after a %true return. 2049 * While @rwork is guaranteed to be executed after a %false return, the 2050 * execution may happen before a full RCU grace period has passed. 2051 */ 2052 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) 2053 { 2054 struct work_struct *work = &rwork->work; 2055 2056 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2057 rwork->wq = wq; 2058 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); 2059 return true; 2060 } 2061 2062 return false; 2063 } 2064 EXPORT_SYMBOL(queue_rcu_work); 2065 2066 static struct worker *alloc_worker(int node) 2067 { 2068 struct worker *worker; 2069 2070 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2071 if (worker) { 2072 INIT_LIST_HEAD(&worker->entry); 2073 INIT_LIST_HEAD(&worker->scheduled); 2074 INIT_LIST_HEAD(&worker->node); 2075 /* on creation a worker is in !idle && prep state */ 2076 worker->flags = WORKER_PREP; 2077 } 2078 return worker; 2079 } 2080 2081 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool) 2082 { 2083 if (pool->cpu < 0 && pool->attrs->affn_strict) 2084 return pool->attrs->__pod_cpumask; 2085 else 2086 return pool->attrs->cpumask; 2087 } 2088 2089 /** 2090 * worker_attach_to_pool() - attach a worker to a pool 2091 * @worker: worker to be attached 2092 * @pool: the target pool 2093 * 2094 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 2095 * cpu-binding of @worker are kept coordinated with the pool across 2096 * cpu-[un]hotplugs. 2097 */ 2098 static void worker_attach_to_pool(struct worker *worker, 2099 struct worker_pool *pool) 2100 { 2101 mutex_lock(&wq_pool_attach_mutex); 2102 2103 /* 2104 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains 2105 * stable across this function. See the comments above the flag 2106 * definition for details. 2107 */ 2108 if (pool->flags & POOL_DISASSOCIATED) 2109 worker->flags |= WORKER_UNBOUND; 2110 else 2111 kthread_set_per_cpu(worker->task, pool->cpu); 2112 2113 if (worker->rescue_wq) 2114 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool)); 2115 2116 list_add_tail(&worker->node, &pool->workers); 2117 worker->pool = pool; 2118 2119 mutex_unlock(&wq_pool_attach_mutex); 2120 } 2121 2122 /** 2123 * worker_detach_from_pool() - detach a worker from its pool 2124 * @worker: worker which is attached to its pool 2125 * 2126 * Undo the attaching which had been done in worker_attach_to_pool(). The 2127 * caller worker shouldn't access to the pool after detached except it has 2128 * other reference to the pool. 2129 */ 2130 static void worker_detach_from_pool(struct worker *worker) 2131 { 2132 struct worker_pool *pool = worker->pool; 2133 struct completion *detach_completion = NULL; 2134 2135 mutex_lock(&wq_pool_attach_mutex); 2136 2137 kthread_set_per_cpu(worker->task, -1); 2138 list_del(&worker->node); 2139 worker->pool = NULL; 2140 2141 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers)) 2142 detach_completion = pool->detach_completion; 2143 mutex_unlock(&wq_pool_attach_mutex); 2144 2145 /* clear leftover flags without pool->lock after it is detached */ 2146 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2147 2148 if (detach_completion) 2149 complete(detach_completion); 2150 } 2151 2152 /** 2153 * create_worker - create a new workqueue worker 2154 * @pool: pool the new worker will belong to 2155 * 2156 * Create and start a new worker which is attached to @pool. 2157 * 2158 * CONTEXT: 2159 * Might sleep. Does GFP_KERNEL allocations. 2160 * 2161 * Return: 2162 * Pointer to the newly created worker. 2163 */ 2164 static struct worker *create_worker(struct worker_pool *pool) 2165 { 2166 struct worker *worker; 2167 int id; 2168 char id_buf[16]; 2169 2170 /* ID is needed to determine kthread name */ 2171 id = ida_alloc(&pool->worker_ida, GFP_KERNEL); 2172 if (id < 0) { 2173 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n", 2174 ERR_PTR(id)); 2175 return NULL; 2176 } 2177 2178 worker = alloc_worker(pool->node); 2179 if (!worker) { 2180 pr_err_once("workqueue: Failed to allocate a worker\n"); 2181 goto fail; 2182 } 2183 2184 worker->id = id; 2185 2186 if (pool->cpu >= 0) 2187 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2188 pool->attrs->nice < 0 ? "H" : ""); 2189 else 2190 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2191 2192 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2193 "kworker/%s", id_buf); 2194 if (IS_ERR(worker->task)) { 2195 if (PTR_ERR(worker->task) == -EINTR) { 2196 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n", 2197 id_buf); 2198 } else { 2199 pr_err_once("workqueue: Failed to create a worker thread: %pe", 2200 worker->task); 2201 } 2202 goto fail; 2203 } 2204 2205 set_user_nice(worker->task, pool->attrs->nice); 2206 kthread_bind_mask(worker->task, pool_allowed_cpus(pool)); 2207 2208 /* successful, attach the worker to the pool */ 2209 worker_attach_to_pool(worker, pool); 2210 2211 /* start the newly created worker */ 2212 raw_spin_lock_irq(&pool->lock); 2213 2214 worker->pool->nr_workers++; 2215 worker_enter_idle(worker); 2216 kick_pool(pool); 2217 2218 /* 2219 * @worker is waiting on a completion in kthread() and will trigger hung 2220 * check if not woken up soon. As kick_pool() might not have waken it 2221 * up, wake it up explicitly once more. 2222 */ 2223 wake_up_process(worker->task); 2224 2225 raw_spin_unlock_irq(&pool->lock); 2226 2227 return worker; 2228 2229 fail: 2230 ida_free(&pool->worker_ida, id); 2231 kfree(worker); 2232 return NULL; 2233 } 2234 2235 static void unbind_worker(struct worker *worker) 2236 { 2237 lockdep_assert_held(&wq_pool_attach_mutex); 2238 2239 kthread_set_per_cpu(worker->task, -1); 2240 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) 2241 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); 2242 else 2243 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); 2244 } 2245 2246 static void wake_dying_workers(struct list_head *cull_list) 2247 { 2248 struct worker *worker, *tmp; 2249 2250 list_for_each_entry_safe(worker, tmp, cull_list, entry) { 2251 list_del_init(&worker->entry); 2252 unbind_worker(worker); 2253 /* 2254 * If the worker was somehow already running, then it had to be 2255 * in pool->idle_list when set_worker_dying() happened or we 2256 * wouldn't have gotten here. 2257 * 2258 * Thus, the worker must either have observed the WORKER_DIE 2259 * flag, or have set its state to TASK_IDLE. Either way, the 2260 * below will be observed by the worker and is safe to do 2261 * outside of pool->lock. 2262 */ 2263 wake_up_process(worker->task); 2264 } 2265 } 2266 2267 /** 2268 * set_worker_dying - Tag a worker for destruction 2269 * @worker: worker to be destroyed 2270 * @list: transfer worker away from its pool->idle_list and into list 2271 * 2272 * Tag @worker for destruction and adjust @pool stats accordingly. The worker 2273 * should be idle. 2274 * 2275 * CONTEXT: 2276 * raw_spin_lock_irq(pool->lock). 2277 */ 2278 static void set_worker_dying(struct worker *worker, struct list_head *list) 2279 { 2280 struct worker_pool *pool = worker->pool; 2281 2282 lockdep_assert_held(&pool->lock); 2283 lockdep_assert_held(&wq_pool_attach_mutex); 2284 2285 /* sanity check frenzy */ 2286 if (WARN_ON(worker->current_work) || 2287 WARN_ON(!list_empty(&worker->scheduled)) || 2288 WARN_ON(!(worker->flags & WORKER_IDLE))) 2289 return; 2290 2291 pool->nr_workers--; 2292 pool->nr_idle--; 2293 2294 worker->flags |= WORKER_DIE; 2295 2296 list_move(&worker->entry, list); 2297 list_move(&worker->node, &pool->dying_workers); 2298 } 2299 2300 /** 2301 * idle_worker_timeout - check if some idle workers can now be deleted. 2302 * @t: The pool's idle_timer that just expired 2303 * 2304 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in 2305 * worker_leave_idle(), as a worker flicking between idle and active while its 2306 * pool is at the too_many_workers() tipping point would cause too much timer 2307 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let 2308 * it expire and re-evaluate things from there. 2309 */ 2310 static void idle_worker_timeout(struct timer_list *t) 2311 { 2312 struct worker_pool *pool = from_timer(pool, t, idle_timer); 2313 bool do_cull = false; 2314 2315 if (work_pending(&pool->idle_cull_work)) 2316 return; 2317 2318 raw_spin_lock_irq(&pool->lock); 2319 2320 if (too_many_workers(pool)) { 2321 struct worker *worker; 2322 unsigned long expires; 2323 2324 /* idle_list is kept in LIFO order, check the last one */ 2325 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2326 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2327 do_cull = !time_before(jiffies, expires); 2328 2329 if (!do_cull) 2330 mod_timer(&pool->idle_timer, expires); 2331 } 2332 raw_spin_unlock_irq(&pool->lock); 2333 2334 if (do_cull) 2335 queue_work(system_unbound_wq, &pool->idle_cull_work); 2336 } 2337 2338 /** 2339 * idle_cull_fn - cull workers that have been idle for too long. 2340 * @work: the pool's work for handling these idle workers 2341 * 2342 * This goes through a pool's idle workers and gets rid of those that have been 2343 * idle for at least IDLE_WORKER_TIMEOUT seconds. 2344 * 2345 * We don't want to disturb isolated CPUs because of a pcpu kworker being 2346 * culled, so this also resets worker affinity. This requires a sleepable 2347 * context, hence the split between timer callback and work item. 2348 */ 2349 static void idle_cull_fn(struct work_struct *work) 2350 { 2351 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); 2352 LIST_HEAD(cull_list); 2353 2354 /* 2355 * Grabbing wq_pool_attach_mutex here ensures an already-running worker 2356 * cannot proceed beyong worker_detach_from_pool() in its self-destruct 2357 * path. This is required as a previously-preempted worker could run after 2358 * set_worker_dying() has happened but before wake_dying_workers() did. 2359 */ 2360 mutex_lock(&wq_pool_attach_mutex); 2361 raw_spin_lock_irq(&pool->lock); 2362 2363 while (too_many_workers(pool)) { 2364 struct worker *worker; 2365 unsigned long expires; 2366 2367 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2368 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2369 2370 if (time_before(jiffies, expires)) { 2371 mod_timer(&pool->idle_timer, expires); 2372 break; 2373 } 2374 2375 set_worker_dying(worker, &cull_list); 2376 } 2377 2378 raw_spin_unlock_irq(&pool->lock); 2379 wake_dying_workers(&cull_list); 2380 mutex_unlock(&wq_pool_attach_mutex); 2381 } 2382 2383 static void send_mayday(struct work_struct *work) 2384 { 2385 struct pool_workqueue *pwq = get_work_pwq(work); 2386 struct workqueue_struct *wq = pwq->wq; 2387 2388 lockdep_assert_held(&wq_mayday_lock); 2389 2390 if (!wq->rescuer) 2391 return; 2392 2393 /* mayday mayday mayday */ 2394 if (list_empty(&pwq->mayday_node)) { 2395 /* 2396 * If @pwq is for an unbound wq, its base ref may be put at 2397 * any time due to an attribute change. Pin @pwq until the 2398 * rescuer is done with it. 2399 */ 2400 get_pwq(pwq); 2401 list_add_tail(&pwq->mayday_node, &wq->maydays); 2402 wake_up_process(wq->rescuer->task); 2403 pwq->stats[PWQ_STAT_MAYDAY]++; 2404 } 2405 } 2406 2407 static void pool_mayday_timeout(struct timer_list *t) 2408 { 2409 struct worker_pool *pool = from_timer(pool, t, mayday_timer); 2410 struct work_struct *work; 2411 2412 raw_spin_lock_irq(&pool->lock); 2413 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2414 2415 if (need_to_create_worker(pool)) { 2416 /* 2417 * We've been trying to create a new worker but 2418 * haven't been successful. We might be hitting an 2419 * allocation deadlock. Send distress signals to 2420 * rescuers. 2421 */ 2422 list_for_each_entry(work, &pool->worklist, entry) 2423 send_mayday(work); 2424 } 2425 2426 raw_spin_unlock(&wq_mayday_lock); 2427 raw_spin_unlock_irq(&pool->lock); 2428 2429 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 2430 } 2431 2432 /** 2433 * maybe_create_worker - create a new worker if necessary 2434 * @pool: pool to create a new worker for 2435 * 2436 * Create a new worker for @pool if necessary. @pool is guaranteed to 2437 * have at least one idle worker on return from this function. If 2438 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 2439 * sent to all rescuers with works scheduled on @pool to resolve 2440 * possible allocation deadlock. 2441 * 2442 * On return, need_to_create_worker() is guaranteed to be %false and 2443 * may_start_working() %true. 2444 * 2445 * LOCKING: 2446 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2447 * multiple times. Does GFP_KERNEL allocations. Called only from 2448 * manager. 2449 */ 2450 static void maybe_create_worker(struct worker_pool *pool) 2451 __releases(&pool->lock) 2452 __acquires(&pool->lock) 2453 { 2454 restart: 2455 raw_spin_unlock_irq(&pool->lock); 2456 2457 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 2458 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 2459 2460 while (true) { 2461 if (create_worker(pool) || !need_to_create_worker(pool)) 2462 break; 2463 2464 schedule_timeout_interruptible(CREATE_COOLDOWN); 2465 2466 if (!need_to_create_worker(pool)) 2467 break; 2468 } 2469 2470 del_timer_sync(&pool->mayday_timer); 2471 raw_spin_lock_irq(&pool->lock); 2472 /* 2473 * This is necessary even after a new worker was just successfully 2474 * created as @pool->lock was dropped and the new worker might have 2475 * already become busy. 2476 */ 2477 if (need_to_create_worker(pool)) 2478 goto restart; 2479 } 2480 2481 /** 2482 * manage_workers - manage worker pool 2483 * @worker: self 2484 * 2485 * Assume the manager role and manage the worker pool @worker belongs 2486 * to. At any given time, there can be only zero or one manager per 2487 * pool. The exclusion is handled automatically by this function. 2488 * 2489 * The caller can safely start processing works on false return. On 2490 * true return, it's guaranteed that need_to_create_worker() is false 2491 * and may_start_working() is true. 2492 * 2493 * CONTEXT: 2494 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2495 * multiple times. Does GFP_KERNEL allocations. 2496 * 2497 * Return: 2498 * %false if the pool doesn't need management and the caller can safely 2499 * start processing works, %true if management function was performed and 2500 * the conditions that the caller verified before calling the function may 2501 * no longer be true. 2502 */ 2503 static bool manage_workers(struct worker *worker) 2504 { 2505 struct worker_pool *pool = worker->pool; 2506 2507 if (pool->flags & POOL_MANAGER_ACTIVE) 2508 return false; 2509 2510 pool->flags |= POOL_MANAGER_ACTIVE; 2511 pool->manager = worker; 2512 2513 maybe_create_worker(pool); 2514 2515 pool->manager = NULL; 2516 pool->flags &= ~POOL_MANAGER_ACTIVE; 2517 rcuwait_wake_up(&manager_wait); 2518 return true; 2519 } 2520 2521 /** 2522 * process_one_work - process single work 2523 * @worker: self 2524 * @work: work to process 2525 * 2526 * Process @work. This function contains all the logics necessary to 2527 * process a single work including synchronization against and 2528 * interaction with other workers on the same cpu, queueing and 2529 * flushing. As long as context requirement is met, any worker can 2530 * call this function to process a work. 2531 * 2532 * CONTEXT: 2533 * raw_spin_lock_irq(pool->lock) which is released and regrabbed. 2534 */ 2535 static void process_one_work(struct worker *worker, struct work_struct *work) 2536 __releases(&pool->lock) 2537 __acquires(&pool->lock) 2538 { 2539 struct pool_workqueue *pwq = get_work_pwq(work); 2540 struct worker_pool *pool = worker->pool; 2541 unsigned long work_data; 2542 #ifdef CONFIG_LOCKDEP 2543 /* 2544 * It is permissible to free the struct work_struct from 2545 * inside the function that is called from it, this we need to 2546 * take into account for lockdep too. To avoid bogus "held 2547 * lock freed" warnings as well as problems when looking into 2548 * work->lockdep_map, make a copy and use that here. 2549 */ 2550 struct lockdep_map lockdep_map; 2551 2552 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 2553 #endif 2554 /* ensure we're on the correct CPU */ 2555 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 2556 raw_smp_processor_id() != pool->cpu); 2557 2558 /* claim and dequeue */ 2559 debug_work_deactivate(work); 2560 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2561 worker->current_work = work; 2562 worker->current_func = work->func; 2563 worker->current_pwq = pwq; 2564 worker->current_at = worker->task->se.sum_exec_runtime; 2565 work_data = *work_data_bits(work); 2566 worker->current_color = get_work_color(work_data); 2567 2568 /* 2569 * Record wq name for cmdline and debug reporting, may get 2570 * overridden through set_worker_desc(). 2571 */ 2572 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); 2573 2574 list_del_init(&work->entry); 2575 2576 /* 2577 * CPU intensive works don't participate in concurrency management. 2578 * They're the scheduler's responsibility. This takes @worker out 2579 * of concurrency management and the next code block will chain 2580 * execution of the pending work items. 2581 */ 2582 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE)) 2583 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 2584 2585 /* 2586 * Kick @pool if necessary. It's always noop for per-cpu worker pools 2587 * since nr_running would always be >= 1 at this point. This is used to 2588 * chain execution of the pending work items for WORKER_NOT_RUNNING 2589 * workers such as the UNBOUND and CPU_INTENSIVE ones. 2590 */ 2591 kick_pool(pool); 2592 2593 /* 2594 * Record the last pool and clear PENDING which should be the last 2595 * update to @work. Also, do this inside @pool->lock so that 2596 * PENDING and queued state changes happen together while IRQ is 2597 * disabled. 2598 */ 2599 set_work_pool_and_clear_pending(work, pool->id); 2600 2601 raw_spin_unlock_irq(&pool->lock); 2602 2603 lock_map_acquire(&pwq->wq->lockdep_map); 2604 lock_map_acquire(&lockdep_map); 2605 /* 2606 * Strictly speaking we should mark the invariant state without holding 2607 * any locks, that is, before these two lock_map_acquire()'s. 2608 * 2609 * However, that would result in: 2610 * 2611 * A(W1) 2612 * WFC(C) 2613 * A(W1) 2614 * C(C) 2615 * 2616 * Which would create W1->C->W1 dependencies, even though there is no 2617 * actual deadlock possible. There are two solutions, using a 2618 * read-recursive acquire on the work(queue) 'locks', but this will then 2619 * hit the lockdep limitation on recursive locks, or simply discard 2620 * these locks. 2621 * 2622 * AFAICT there is no possible deadlock scenario between the 2623 * flush_work() and complete() primitives (except for single-threaded 2624 * workqueues), so hiding them isn't a problem. 2625 */ 2626 lockdep_invariant_state(true); 2627 pwq->stats[PWQ_STAT_STARTED]++; 2628 trace_workqueue_execute_start(work); 2629 worker->current_func(work); 2630 /* 2631 * While we must be careful to not use "work" after this, the trace 2632 * point will only record its address. 2633 */ 2634 trace_workqueue_execute_end(work, worker->current_func); 2635 pwq->stats[PWQ_STAT_COMPLETED]++; 2636 lock_map_release(&lockdep_map); 2637 lock_map_release(&pwq->wq->lockdep_map); 2638 2639 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2640 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 2641 " last function: %ps\n", 2642 current->comm, preempt_count(), task_pid_nr(current), 2643 worker->current_func); 2644 debug_show_held_locks(current); 2645 dump_stack(); 2646 } 2647 2648 /* 2649 * The following prevents a kworker from hogging CPU on !PREEMPTION 2650 * kernels, where a requeueing work item waiting for something to 2651 * happen could deadlock with stop_machine as such work item could 2652 * indefinitely requeue itself while all other CPUs are trapped in 2653 * stop_machine. At the same time, report a quiescent RCU state so 2654 * the same condition doesn't freeze RCU. 2655 */ 2656 cond_resched(); 2657 2658 raw_spin_lock_irq(&pool->lock); 2659 2660 /* 2661 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked 2662 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than 2663 * wq_cpu_intensive_thresh_us. Clear it. 2664 */ 2665 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2666 2667 /* tag the worker for identification in schedule() */ 2668 worker->last_func = worker->current_func; 2669 2670 /* we're done with it, release */ 2671 hash_del(&worker->hentry); 2672 worker->current_work = NULL; 2673 worker->current_func = NULL; 2674 worker->current_pwq = NULL; 2675 worker->current_color = INT_MAX; 2676 pwq_dec_nr_in_flight(pwq, work_data); 2677 } 2678 2679 /** 2680 * process_scheduled_works - process scheduled works 2681 * @worker: self 2682 * 2683 * Process all scheduled works. Please note that the scheduled list 2684 * may change while processing a work, so this function repeatedly 2685 * fetches a work from the top and executes it. 2686 * 2687 * CONTEXT: 2688 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed 2689 * multiple times. 2690 */ 2691 static void process_scheduled_works(struct worker *worker) 2692 { 2693 struct work_struct *work; 2694 bool first = true; 2695 2696 while ((work = list_first_entry_or_null(&worker->scheduled, 2697 struct work_struct, entry))) { 2698 if (first) { 2699 worker->pool->watchdog_ts = jiffies; 2700 first = false; 2701 } 2702 process_one_work(worker, work); 2703 } 2704 } 2705 2706 static void set_pf_worker(bool val) 2707 { 2708 mutex_lock(&wq_pool_attach_mutex); 2709 if (val) 2710 current->flags |= PF_WQ_WORKER; 2711 else 2712 current->flags &= ~PF_WQ_WORKER; 2713 mutex_unlock(&wq_pool_attach_mutex); 2714 } 2715 2716 /** 2717 * worker_thread - the worker thread function 2718 * @__worker: self 2719 * 2720 * The worker thread function. All workers belong to a worker_pool - 2721 * either a per-cpu one or dynamic unbound one. These workers process all 2722 * work items regardless of their specific target workqueue. The only 2723 * exception is work items which belong to workqueues with a rescuer which 2724 * will be explained in rescuer_thread(). 2725 * 2726 * Return: 0 2727 */ 2728 static int worker_thread(void *__worker) 2729 { 2730 struct worker *worker = __worker; 2731 struct worker_pool *pool = worker->pool; 2732 2733 /* tell the scheduler that this is a workqueue worker */ 2734 set_pf_worker(true); 2735 woke_up: 2736 raw_spin_lock_irq(&pool->lock); 2737 2738 /* am I supposed to die? */ 2739 if (unlikely(worker->flags & WORKER_DIE)) { 2740 raw_spin_unlock_irq(&pool->lock); 2741 set_pf_worker(false); 2742 2743 set_task_comm(worker->task, "kworker/dying"); 2744 ida_free(&pool->worker_ida, worker->id); 2745 worker_detach_from_pool(worker); 2746 WARN_ON_ONCE(!list_empty(&worker->entry)); 2747 kfree(worker); 2748 return 0; 2749 } 2750 2751 worker_leave_idle(worker); 2752 recheck: 2753 /* no more worker necessary? */ 2754 if (!need_more_worker(pool)) 2755 goto sleep; 2756 2757 /* do we need to manage? */ 2758 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 2759 goto recheck; 2760 2761 /* 2762 * ->scheduled list can only be filled while a worker is 2763 * preparing to process a work or actually processing it. 2764 * Make sure nobody diddled with it while I was sleeping. 2765 */ 2766 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 2767 2768 /* 2769 * Finish PREP stage. We're guaranteed to have at least one idle 2770 * worker or that someone else has already assumed the manager 2771 * role. This is where @worker starts participating in concurrency 2772 * management if applicable and concurrency management is restored 2773 * after being rebound. See rebind_workers() for details. 2774 */ 2775 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 2776 2777 do { 2778 struct work_struct *work = 2779 list_first_entry(&pool->worklist, 2780 struct work_struct, entry); 2781 2782 if (assign_work(work, worker, NULL)) 2783 process_scheduled_works(worker); 2784 } while (keep_working(pool)); 2785 2786 worker_set_flags(worker, WORKER_PREP); 2787 sleep: 2788 /* 2789 * pool->lock is held and there's no work to process and no need to 2790 * manage, sleep. Workers are woken up only while holding 2791 * pool->lock or from local cpu, so setting the current state 2792 * before releasing pool->lock is enough to prevent losing any 2793 * event. 2794 */ 2795 worker_enter_idle(worker); 2796 __set_current_state(TASK_IDLE); 2797 raw_spin_unlock_irq(&pool->lock); 2798 schedule(); 2799 goto woke_up; 2800 } 2801 2802 /** 2803 * rescuer_thread - the rescuer thread function 2804 * @__rescuer: self 2805 * 2806 * Workqueue rescuer thread function. There's one rescuer for each 2807 * workqueue which has WQ_MEM_RECLAIM set. 2808 * 2809 * Regular work processing on a pool may block trying to create a new 2810 * worker which uses GFP_KERNEL allocation which has slight chance of 2811 * developing into deadlock if some works currently on the same queue 2812 * need to be processed to satisfy the GFP_KERNEL allocation. This is 2813 * the problem rescuer solves. 2814 * 2815 * When such condition is possible, the pool summons rescuers of all 2816 * workqueues which have works queued on the pool and let them process 2817 * those works so that forward progress can be guaranteed. 2818 * 2819 * This should happen rarely. 2820 * 2821 * Return: 0 2822 */ 2823 static int rescuer_thread(void *__rescuer) 2824 { 2825 struct worker *rescuer = __rescuer; 2826 struct workqueue_struct *wq = rescuer->rescue_wq; 2827 bool should_stop; 2828 2829 set_user_nice(current, RESCUER_NICE_LEVEL); 2830 2831 /* 2832 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 2833 * doesn't participate in concurrency management. 2834 */ 2835 set_pf_worker(true); 2836 repeat: 2837 set_current_state(TASK_IDLE); 2838 2839 /* 2840 * By the time the rescuer is requested to stop, the workqueue 2841 * shouldn't have any work pending, but @wq->maydays may still have 2842 * pwq(s) queued. This can happen by non-rescuer workers consuming 2843 * all the work items before the rescuer got to them. Go through 2844 * @wq->maydays processing before acting on should_stop so that the 2845 * list is always empty on exit. 2846 */ 2847 should_stop = kthread_should_stop(); 2848 2849 /* see whether any pwq is asking for help */ 2850 raw_spin_lock_irq(&wq_mayday_lock); 2851 2852 while (!list_empty(&wq->maydays)) { 2853 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 2854 struct pool_workqueue, mayday_node); 2855 struct worker_pool *pool = pwq->pool; 2856 struct work_struct *work, *n; 2857 2858 __set_current_state(TASK_RUNNING); 2859 list_del_init(&pwq->mayday_node); 2860 2861 raw_spin_unlock_irq(&wq_mayday_lock); 2862 2863 worker_attach_to_pool(rescuer, pool); 2864 2865 raw_spin_lock_irq(&pool->lock); 2866 2867 /* 2868 * Slurp in all works issued via this workqueue and 2869 * process'em. 2870 */ 2871 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2872 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 2873 if (get_work_pwq(work) == pwq && 2874 assign_work(work, rescuer, &n)) 2875 pwq->stats[PWQ_STAT_RESCUED]++; 2876 } 2877 2878 if (!list_empty(&rescuer->scheduled)) { 2879 process_scheduled_works(rescuer); 2880 2881 /* 2882 * The above execution of rescued work items could 2883 * have created more to rescue through 2884 * pwq_activate_first_inactive() or chained 2885 * queueing. Let's put @pwq back on mayday list so 2886 * that such back-to-back work items, which may be 2887 * being used to relieve memory pressure, don't 2888 * incur MAYDAY_INTERVAL delay inbetween. 2889 */ 2890 if (pwq->nr_active && need_to_create_worker(pool)) { 2891 raw_spin_lock(&wq_mayday_lock); 2892 /* 2893 * Queue iff we aren't racing destruction 2894 * and somebody else hasn't queued it already. 2895 */ 2896 if (wq->rescuer && list_empty(&pwq->mayday_node)) { 2897 get_pwq(pwq); 2898 list_add_tail(&pwq->mayday_node, &wq->maydays); 2899 } 2900 raw_spin_unlock(&wq_mayday_lock); 2901 } 2902 } 2903 2904 /* 2905 * Put the reference grabbed by send_mayday(). @pool won't 2906 * go away while we're still attached to it. 2907 */ 2908 put_pwq(pwq); 2909 2910 /* 2911 * Leave this pool. Notify regular workers; otherwise, we end up 2912 * with 0 concurrency and stalling the execution. 2913 */ 2914 kick_pool(pool); 2915 2916 raw_spin_unlock_irq(&pool->lock); 2917 2918 worker_detach_from_pool(rescuer); 2919 2920 raw_spin_lock_irq(&wq_mayday_lock); 2921 } 2922 2923 raw_spin_unlock_irq(&wq_mayday_lock); 2924 2925 if (should_stop) { 2926 __set_current_state(TASK_RUNNING); 2927 set_pf_worker(false); 2928 return 0; 2929 } 2930 2931 /* rescuers should never participate in concurrency management */ 2932 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2933 schedule(); 2934 goto repeat; 2935 } 2936 2937 /** 2938 * check_flush_dependency - check for flush dependency sanity 2939 * @target_wq: workqueue being flushed 2940 * @target_work: work item being flushed (NULL for workqueue flushes) 2941 * 2942 * %current is trying to flush the whole @target_wq or @target_work on it. 2943 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 2944 * reclaiming memory or running on a workqueue which doesn't have 2945 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 2946 * a deadlock. 2947 */ 2948 static void check_flush_dependency(struct workqueue_struct *target_wq, 2949 struct work_struct *target_work) 2950 { 2951 work_func_t target_func = target_work ? target_work->func : NULL; 2952 struct worker *worker; 2953 2954 if (target_wq->flags & WQ_MEM_RECLAIM) 2955 return; 2956 2957 worker = current_wq_worker(); 2958 2959 WARN_ONCE(current->flags & PF_MEMALLOC, 2960 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps", 2961 current->pid, current->comm, target_wq->name, target_func); 2962 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 2963 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 2964 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps", 2965 worker->current_pwq->wq->name, worker->current_func, 2966 target_wq->name, target_func); 2967 } 2968 2969 struct wq_barrier { 2970 struct work_struct work; 2971 struct completion done; 2972 struct task_struct *task; /* purely informational */ 2973 }; 2974 2975 static void wq_barrier_func(struct work_struct *work) 2976 { 2977 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 2978 complete(&barr->done); 2979 } 2980 2981 /** 2982 * insert_wq_barrier - insert a barrier work 2983 * @pwq: pwq to insert barrier into 2984 * @barr: wq_barrier to insert 2985 * @target: target work to attach @barr to 2986 * @worker: worker currently executing @target, NULL if @target is not executing 2987 * 2988 * @barr is linked to @target such that @barr is completed only after 2989 * @target finishes execution. Please note that the ordering 2990 * guarantee is observed only with respect to @target and on the local 2991 * cpu. 2992 * 2993 * Currently, a queued barrier can't be canceled. This is because 2994 * try_to_grab_pending() can't determine whether the work to be 2995 * grabbed is at the head of the queue and thus can't clear LINKED 2996 * flag of the previous work while there must be a valid next work 2997 * after a work with LINKED flag set. 2998 * 2999 * Note that when @worker is non-NULL, @target may be modified 3000 * underneath us, so we can't reliably determine pwq from @target. 3001 * 3002 * CONTEXT: 3003 * raw_spin_lock_irq(pool->lock). 3004 */ 3005 static void insert_wq_barrier(struct pool_workqueue *pwq, 3006 struct wq_barrier *barr, 3007 struct work_struct *target, struct worker *worker) 3008 { 3009 unsigned int work_flags = 0; 3010 unsigned int work_color; 3011 struct list_head *head; 3012 3013 /* 3014 * debugobject calls are safe here even with pool->lock locked 3015 * as we know for sure that this will not trigger any of the 3016 * checks and call back into the fixup functions where we 3017 * might deadlock. 3018 */ 3019 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 3020 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3021 3022 init_completion_map(&barr->done, &target->lockdep_map); 3023 3024 barr->task = current; 3025 3026 /* The barrier work item does not participate in pwq->nr_active. */ 3027 work_flags |= WORK_STRUCT_INACTIVE; 3028 3029 /* 3030 * If @target is currently being executed, schedule the 3031 * barrier to the worker; otherwise, put it after @target. 3032 */ 3033 if (worker) { 3034 head = worker->scheduled.next; 3035 work_color = worker->current_color; 3036 } else { 3037 unsigned long *bits = work_data_bits(target); 3038 3039 head = target->entry.next; 3040 /* there can already be other linked works, inherit and set */ 3041 work_flags |= *bits & WORK_STRUCT_LINKED; 3042 work_color = get_work_color(*bits); 3043 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3044 } 3045 3046 pwq->nr_in_flight[work_color]++; 3047 work_flags |= work_color_to_flags(work_color); 3048 3049 insert_work(pwq, &barr->work, head, work_flags); 3050 } 3051 3052 /** 3053 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3054 * @wq: workqueue being flushed 3055 * @flush_color: new flush color, < 0 for no-op 3056 * @work_color: new work color, < 0 for no-op 3057 * 3058 * Prepare pwqs for workqueue flushing. 3059 * 3060 * If @flush_color is non-negative, flush_color on all pwqs should be 3061 * -1. If no pwq has in-flight commands at the specified color, all 3062 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3063 * has in flight commands, its pwq->flush_color is set to 3064 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3065 * wakeup logic is armed and %true is returned. 3066 * 3067 * The caller should have initialized @wq->first_flusher prior to 3068 * calling this function with non-negative @flush_color. If 3069 * @flush_color is negative, no flush color update is done and %false 3070 * is returned. 3071 * 3072 * If @work_color is non-negative, all pwqs should have the same 3073 * work_color which is previous to @work_color and all will be 3074 * advanced to @work_color. 3075 * 3076 * CONTEXT: 3077 * mutex_lock(wq->mutex). 3078 * 3079 * Return: 3080 * %true if @flush_color >= 0 and there's something to flush. %false 3081 * otherwise. 3082 */ 3083 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3084 int flush_color, int work_color) 3085 { 3086 bool wait = false; 3087 struct pool_workqueue *pwq; 3088 3089 if (flush_color >= 0) { 3090 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3091 atomic_set(&wq->nr_pwqs_to_flush, 1); 3092 } 3093 3094 for_each_pwq(pwq, wq) { 3095 struct worker_pool *pool = pwq->pool; 3096 3097 raw_spin_lock_irq(&pool->lock); 3098 3099 if (flush_color >= 0) { 3100 WARN_ON_ONCE(pwq->flush_color != -1); 3101 3102 if (pwq->nr_in_flight[flush_color]) { 3103 pwq->flush_color = flush_color; 3104 atomic_inc(&wq->nr_pwqs_to_flush); 3105 wait = true; 3106 } 3107 } 3108 3109 if (work_color >= 0) { 3110 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3111 pwq->work_color = work_color; 3112 } 3113 3114 raw_spin_unlock_irq(&pool->lock); 3115 } 3116 3117 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3118 complete(&wq->first_flusher->done); 3119 3120 return wait; 3121 } 3122 3123 /** 3124 * __flush_workqueue - ensure that any scheduled work has run to completion. 3125 * @wq: workqueue to flush 3126 * 3127 * This function sleeps until all work items which were queued on entry 3128 * have finished execution, but it is not livelocked by new incoming ones. 3129 */ 3130 void __flush_workqueue(struct workqueue_struct *wq) 3131 { 3132 struct wq_flusher this_flusher = { 3133 .list = LIST_HEAD_INIT(this_flusher.list), 3134 .flush_color = -1, 3135 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 3136 }; 3137 int next_color; 3138 3139 if (WARN_ON(!wq_online)) 3140 return; 3141 3142 lock_map_acquire(&wq->lockdep_map); 3143 lock_map_release(&wq->lockdep_map); 3144 3145 mutex_lock(&wq->mutex); 3146 3147 /* 3148 * Start-to-wait phase 3149 */ 3150 next_color = work_next_color(wq->work_color); 3151 3152 if (next_color != wq->flush_color) { 3153 /* 3154 * Color space is not full. The current work_color 3155 * becomes our flush_color and work_color is advanced 3156 * by one. 3157 */ 3158 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3159 this_flusher.flush_color = wq->work_color; 3160 wq->work_color = next_color; 3161 3162 if (!wq->first_flusher) { 3163 /* no flush in progress, become the first flusher */ 3164 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3165 3166 wq->first_flusher = &this_flusher; 3167 3168 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3169 wq->work_color)) { 3170 /* nothing to flush, done */ 3171 wq->flush_color = next_color; 3172 wq->first_flusher = NULL; 3173 goto out_unlock; 3174 } 3175 } else { 3176 /* wait in queue */ 3177 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3178 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3179 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3180 } 3181 } else { 3182 /* 3183 * Oops, color space is full, wait on overflow queue. 3184 * The next flush completion will assign us 3185 * flush_color and transfer to flusher_queue. 3186 */ 3187 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 3188 } 3189 3190 check_flush_dependency(wq, NULL); 3191 3192 mutex_unlock(&wq->mutex); 3193 3194 wait_for_completion(&this_flusher.done); 3195 3196 /* 3197 * Wake-up-and-cascade phase 3198 * 3199 * First flushers are responsible for cascading flushes and 3200 * handling overflow. Non-first flushers can simply return. 3201 */ 3202 if (READ_ONCE(wq->first_flusher) != &this_flusher) 3203 return; 3204 3205 mutex_lock(&wq->mutex); 3206 3207 /* we might have raced, check again with mutex held */ 3208 if (wq->first_flusher != &this_flusher) 3209 goto out_unlock; 3210 3211 WRITE_ONCE(wq->first_flusher, NULL); 3212 3213 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 3214 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3215 3216 while (true) { 3217 struct wq_flusher *next, *tmp; 3218 3219 /* complete all the flushers sharing the current flush color */ 3220 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 3221 if (next->flush_color != wq->flush_color) 3222 break; 3223 list_del_init(&next->list); 3224 complete(&next->done); 3225 } 3226 3227 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 3228 wq->flush_color != work_next_color(wq->work_color)); 3229 3230 /* this flush_color is finished, advance by one */ 3231 wq->flush_color = work_next_color(wq->flush_color); 3232 3233 /* one color has been freed, handle overflow queue */ 3234 if (!list_empty(&wq->flusher_overflow)) { 3235 /* 3236 * Assign the same color to all overflowed 3237 * flushers, advance work_color and append to 3238 * flusher_queue. This is the start-to-wait 3239 * phase for these overflowed flushers. 3240 */ 3241 list_for_each_entry(tmp, &wq->flusher_overflow, list) 3242 tmp->flush_color = wq->work_color; 3243 3244 wq->work_color = work_next_color(wq->work_color); 3245 3246 list_splice_tail_init(&wq->flusher_overflow, 3247 &wq->flusher_queue); 3248 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3249 } 3250 3251 if (list_empty(&wq->flusher_queue)) { 3252 WARN_ON_ONCE(wq->flush_color != wq->work_color); 3253 break; 3254 } 3255 3256 /* 3257 * Need to flush more colors. Make the next flusher 3258 * the new first flusher and arm pwqs. 3259 */ 3260 WARN_ON_ONCE(wq->flush_color == wq->work_color); 3261 WARN_ON_ONCE(wq->flush_color != next->flush_color); 3262 3263 list_del_init(&next->list); 3264 wq->first_flusher = next; 3265 3266 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 3267 break; 3268 3269 /* 3270 * Meh... this color is already done, clear first 3271 * flusher and repeat cascading. 3272 */ 3273 wq->first_flusher = NULL; 3274 } 3275 3276 out_unlock: 3277 mutex_unlock(&wq->mutex); 3278 } 3279 EXPORT_SYMBOL(__flush_workqueue); 3280 3281 /** 3282 * drain_workqueue - drain a workqueue 3283 * @wq: workqueue to drain 3284 * 3285 * Wait until the workqueue becomes empty. While draining is in progress, 3286 * only chain queueing is allowed. IOW, only currently pending or running 3287 * work items on @wq can queue further work items on it. @wq is flushed 3288 * repeatedly until it becomes empty. The number of flushing is determined 3289 * by the depth of chaining and should be relatively short. Whine if it 3290 * takes too long. 3291 */ 3292 void drain_workqueue(struct workqueue_struct *wq) 3293 { 3294 unsigned int flush_cnt = 0; 3295 struct pool_workqueue *pwq; 3296 3297 /* 3298 * __queue_work() needs to test whether there are drainers, is much 3299 * hotter than drain_workqueue() and already looks at @wq->flags. 3300 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 3301 */ 3302 mutex_lock(&wq->mutex); 3303 if (!wq->nr_drainers++) 3304 wq->flags |= __WQ_DRAINING; 3305 mutex_unlock(&wq->mutex); 3306 reflush: 3307 __flush_workqueue(wq); 3308 3309 mutex_lock(&wq->mutex); 3310 3311 for_each_pwq(pwq, wq) { 3312 bool drained; 3313 3314 raw_spin_lock_irq(&pwq->pool->lock); 3315 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); 3316 raw_spin_unlock_irq(&pwq->pool->lock); 3317 3318 if (drained) 3319 continue; 3320 3321 if (++flush_cnt == 10 || 3322 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 3323 pr_warn("workqueue %s: %s() isn't complete after %u tries\n", 3324 wq->name, __func__, flush_cnt); 3325 3326 mutex_unlock(&wq->mutex); 3327 goto reflush; 3328 } 3329 3330 if (!--wq->nr_drainers) 3331 wq->flags &= ~__WQ_DRAINING; 3332 mutex_unlock(&wq->mutex); 3333 } 3334 EXPORT_SYMBOL_GPL(drain_workqueue); 3335 3336 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 3337 bool from_cancel) 3338 { 3339 struct worker *worker = NULL; 3340 struct worker_pool *pool; 3341 struct pool_workqueue *pwq; 3342 3343 might_sleep(); 3344 3345 rcu_read_lock(); 3346 pool = get_work_pool(work); 3347 if (!pool) { 3348 rcu_read_unlock(); 3349 return false; 3350 } 3351 3352 raw_spin_lock_irq(&pool->lock); 3353 /* see the comment in try_to_grab_pending() with the same code */ 3354 pwq = get_work_pwq(work); 3355 if (pwq) { 3356 if (unlikely(pwq->pool != pool)) 3357 goto already_gone; 3358 } else { 3359 worker = find_worker_executing_work(pool, work); 3360 if (!worker) 3361 goto already_gone; 3362 pwq = worker->current_pwq; 3363 } 3364 3365 check_flush_dependency(pwq->wq, work); 3366 3367 insert_wq_barrier(pwq, barr, work, worker); 3368 raw_spin_unlock_irq(&pool->lock); 3369 3370 /* 3371 * Force a lock recursion deadlock when using flush_work() inside a 3372 * single-threaded or rescuer equipped workqueue. 3373 * 3374 * For single threaded workqueues the deadlock happens when the work 3375 * is after the work issuing the flush_work(). For rescuer equipped 3376 * workqueues the deadlock happens when the rescuer stalls, blocking 3377 * forward progress. 3378 */ 3379 if (!from_cancel && 3380 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { 3381 lock_map_acquire(&pwq->wq->lockdep_map); 3382 lock_map_release(&pwq->wq->lockdep_map); 3383 } 3384 rcu_read_unlock(); 3385 return true; 3386 already_gone: 3387 raw_spin_unlock_irq(&pool->lock); 3388 rcu_read_unlock(); 3389 return false; 3390 } 3391 3392 static bool __flush_work(struct work_struct *work, bool from_cancel) 3393 { 3394 struct wq_barrier barr; 3395 3396 if (WARN_ON(!wq_online)) 3397 return false; 3398 3399 if (WARN_ON(!work->func)) 3400 return false; 3401 3402 lock_map_acquire(&work->lockdep_map); 3403 lock_map_release(&work->lockdep_map); 3404 3405 if (start_flush_work(work, &barr, from_cancel)) { 3406 wait_for_completion(&barr.done); 3407 destroy_work_on_stack(&barr.work); 3408 return true; 3409 } else { 3410 return false; 3411 } 3412 } 3413 3414 /** 3415 * flush_work - wait for a work to finish executing the last queueing instance 3416 * @work: the work to flush 3417 * 3418 * Wait until @work has finished execution. @work is guaranteed to be idle 3419 * on return if it hasn't been requeued since flush started. 3420 * 3421 * Return: 3422 * %true if flush_work() waited for the work to finish execution, 3423 * %false if it was already idle. 3424 */ 3425 bool flush_work(struct work_struct *work) 3426 { 3427 return __flush_work(work, false); 3428 } 3429 EXPORT_SYMBOL_GPL(flush_work); 3430 3431 struct cwt_wait { 3432 wait_queue_entry_t wait; 3433 struct work_struct *work; 3434 }; 3435 3436 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 3437 { 3438 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 3439 3440 if (cwait->work != key) 3441 return 0; 3442 return autoremove_wake_function(wait, mode, sync, key); 3443 } 3444 3445 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 3446 { 3447 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 3448 unsigned long flags; 3449 int ret; 3450 3451 do { 3452 ret = try_to_grab_pending(work, is_dwork, &flags); 3453 /* 3454 * If someone else is already canceling, wait for it to 3455 * finish. flush_work() doesn't work for PREEMPT_NONE 3456 * because we may get scheduled between @work's completion 3457 * and the other canceling task resuming and clearing 3458 * CANCELING - flush_work() will return false immediately 3459 * as @work is no longer busy, try_to_grab_pending() will 3460 * return -ENOENT as @work is still being canceled and the 3461 * other canceling task won't be able to clear CANCELING as 3462 * we're hogging the CPU. 3463 * 3464 * Let's wait for completion using a waitqueue. As this 3465 * may lead to the thundering herd problem, use a custom 3466 * wake function which matches @work along with exclusive 3467 * wait and wakeup. 3468 */ 3469 if (unlikely(ret == -ENOENT)) { 3470 struct cwt_wait cwait; 3471 3472 init_wait(&cwait.wait); 3473 cwait.wait.func = cwt_wakefn; 3474 cwait.work = work; 3475 3476 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 3477 TASK_UNINTERRUPTIBLE); 3478 if (work_is_canceling(work)) 3479 schedule(); 3480 finish_wait(&cancel_waitq, &cwait.wait); 3481 } 3482 } while (unlikely(ret < 0)); 3483 3484 /* tell other tasks trying to grab @work to back off */ 3485 mark_work_canceling(work); 3486 local_irq_restore(flags); 3487 3488 /* 3489 * This allows canceling during early boot. We know that @work 3490 * isn't executing. 3491 */ 3492 if (wq_online) 3493 __flush_work(work, true); 3494 3495 clear_work_data(work); 3496 3497 /* 3498 * Paired with prepare_to_wait() above so that either 3499 * waitqueue_active() is visible here or !work_is_canceling() is 3500 * visible there. 3501 */ 3502 smp_mb(); 3503 if (waitqueue_active(&cancel_waitq)) 3504 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 3505 3506 return ret; 3507 } 3508 3509 /** 3510 * cancel_work_sync - cancel a work and wait for it to finish 3511 * @work: the work to cancel 3512 * 3513 * Cancel @work and wait for its execution to finish. This function 3514 * can be used even if the work re-queues itself or migrates to 3515 * another workqueue. On return from this function, @work is 3516 * guaranteed to be not pending or executing on any CPU. 3517 * 3518 * cancel_work_sync(&delayed_work->work) must not be used for 3519 * delayed_work's. Use cancel_delayed_work_sync() instead. 3520 * 3521 * The caller must ensure that the workqueue on which @work was last 3522 * queued can't be destroyed before this function returns. 3523 * 3524 * Return: 3525 * %true if @work was pending, %false otherwise. 3526 */ 3527 bool cancel_work_sync(struct work_struct *work) 3528 { 3529 return __cancel_work_timer(work, false); 3530 } 3531 EXPORT_SYMBOL_GPL(cancel_work_sync); 3532 3533 /** 3534 * flush_delayed_work - wait for a dwork to finish executing the last queueing 3535 * @dwork: the delayed work to flush 3536 * 3537 * Delayed timer is cancelled and the pending work is queued for 3538 * immediate execution. Like flush_work(), this function only 3539 * considers the last queueing instance of @dwork. 3540 * 3541 * Return: 3542 * %true if flush_work() waited for the work to finish execution, 3543 * %false if it was already idle. 3544 */ 3545 bool flush_delayed_work(struct delayed_work *dwork) 3546 { 3547 local_irq_disable(); 3548 if (del_timer_sync(&dwork->timer)) 3549 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 3550 local_irq_enable(); 3551 return flush_work(&dwork->work); 3552 } 3553 EXPORT_SYMBOL(flush_delayed_work); 3554 3555 /** 3556 * flush_rcu_work - wait for a rwork to finish executing the last queueing 3557 * @rwork: the rcu work to flush 3558 * 3559 * Return: 3560 * %true if flush_rcu_work() waited for the work to finish execution, 3561 * %false if it was already idle. 3562 */ 3563 bool flush_rcu_work(struct rcu_work *rwork) 3564 { 3565 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { 3566 rcu_barrier(); 3567 flush_work(&rwork->work); 3568 return true; 3569 } else { 3570 return flush_work(&rwork->work); 3571 } 3572 } 3573 EXPORT_SYMBOL(flush_rcu_work); 3574 3575 static bool __cancel_work(struct work_struct *work, bool is_dwork) 3576 { 3577 unsigned long flags; 3578 int ret; 3579 3580 do { 3581 ret = try_to_grab_pending(work, is_dwork, &flags); 3582 } while (unlikely(ret == -EAGAIN)); 3583 3584 if (unlikely(ret < 0)) 3585 return false; 3586 3587 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); 3588 local_irq_restore(flags); 3589 return ret; 3590 } 3591 3592 /* 3593 * See cancel_delayed_work() 3594 */ 3595 bool cancel_work(struct work_struct *work) 3596 { 3597 return __cancel_work(work, false); 3598 } 3599 EXPORT_SYMBOL(cancel_work); 3600 3601 /** 3602 * cancel_delayed_work - cancel a delayed work 3603 * @dwork: delayed_work to cancel 3604 * 3605 * Kill off a pending delayed_work. 3606 * 3607 * Return: %true if @dwork was pending and canceled; %false if it wasn't 3608 * pending. 3609 * 3610 * Note: 3611 * The work callback function may still be running on return, unless 3612 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 3613 * use cancel_delayed_work_sync() to wait on it. 3614 * 3615 * This function is safe to call from any context including IRQ handler. 3616 */ 3617 bool cancel_delayed_work(struct delayed_work *dwork) 3618 { 3619 return __cancel_work(&dwork->work, true); 3620 } 3621 EXPORT_SYMBOL(cancel_delayed_work); 3622 3623 /** 3624 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 3625 * @dwork: the delayed work cancel 3626 * 3627 * This is cancel_work_sync() for delayed works. 3628 * 3629 * Return: 3630 * %true if @dwork was pending, %false otherwise. 3631 */ 3632 bool cancel_delayed_work_sync(struct delayed_work *dwork) 3633 { 3634 return __cancel_work_timer(&dwork->work, true); 3635 } 3636 EXPORT_SYMBOL(cancel_delayed_work_sync); 3637 3638 /** 3639 * schedule_on_each_cpu - execute a function synchronously on each online CPU 3640 * @func: the function to call 3641 * 3642 * schedule_on_each_cpu() executes @func on each online CPU using the 3643 * system workqueue and blocks until all CPUs have completed. 3644 * schedule_on_each_cpu() is very slow. 3645 * 3646 * Return: 3647 * 0 on success, -errno on failure. 3648 */ 3649 int schedule_on_each_cpu(work_func_t func) 3650 { 3651 int cpu; 3652 struct work_struct __percpu *works; 3653 3654 works = alloc_percpu(struct work_struct); 3655 if (!works) 3656 return -ENOMEM; 3657 3658 cpus_read_lock(); 3659 3660 for_each_online_cpu(cpu) { 3661 struct work_struct *work = per_cpu_ptr(works, cpu); 3662 3663 INIT_WORK(work, func); 3664 schedule_work_on(cpu, work); 3665 } 3666 3667 for_each_online_cpu(cpu) 3668 flush_work(per_cpu_ptr(works, cpu)); 3669 3670 cpus_read_unlock(); 3671 free_percpu(works); 3672 return 0; 3673 } 3674 3675 /** 3676 * execute_in_process_context - reliably execute the routine with user context 3677 * @fn: the function to execute 3678 * @ew: guaranteed storage for the execute work structure (must 3679 * be available when the work executes) 3680 * 3681 * Executes the function immediately if process context is available, 3682 * otherwise schedules the function for delayed execution. 3683 * 3684 * Return: 0 - function was executed 3685 * 1 - function was scheduled for execution 3686 */ 3687 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 3688 { 3689 if (!in_interrupt()) { 3690 fn(&ew->work); 3691 return 0; 3692 } 3693 3694 INIT_WORK(&ew->work, fn); 3695 schedule_work(&ew->work); 3696 3697 return 1; 3698 } 3699 EXPORT_SYMBOL_GPL(execute_in_process_context); 3700 3701 /** 3702 * free_workqueue_attrs - free a workqueue_attrs 3703 * @attrs: workqueue_attrs to free 3704 * 3705 * Undo alloc_workqueue_attrs(). 3706 */ 3707 void free_workqueue_attrs(struct workqueue_attrs *attrs) 3708 { 3709 if (attrs) { 3710 free_cpumask_var(attrs->cpumask); 3711 free_cpumask_var(attrs->__pod_cpumask); 3712 kfree(attrs); 3713 } 3714 } 3715 3716 /** 3717 * alloc_workqueue_attrs - allocate a workqueue_attrs 3718 * 3719 * Allocate a new workqueue_attrs, initialize with default settings and 3720 * return it. 3721 * 3722 * Return: The allocated new workqueue_attr on success. %NULL on failure. 3723 */ 3724 struct workqueue_attrs *alloc_workqueue_attrs(void) 3725 { 3726 struct workqueue_attrs *attrs; 3727 3728 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 3729 if (!attrs) 3730 goto fail; 3731 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL)) 3732 goto fail; 3733 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL)) 3734 goto fail; 3735 3736 cpumask_copy(attrs->cpumask, cpu_possible_mask); 3737 attrs->affn_scope = wq_affn_dfl; 3738 return attrs; 3739 fail: 3740 free_workqueue_attrs(attrs); 3741 return NULL; 3742 } 3743 3744 static void copy_workqueue_attrs(struct workqueue_attrs *to, 3745 const struct workqueue_attrs *from) 3746 { 3747 to->nice = from->nice; 3748 cpumask_copy(to->cpumask, from->cpumask); 3749 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask); 3750 to->affn_strict = from->affn_strict; 3751 3752 /* 3753 * Unlike hash and equality test, copying shouldn't ignore wq-only 3754 * fields as copying is used for both pool and wq attrs. Instead, 3755 * get_unbound_pool() explicitly clears the fields. 3756 */ 3757 to->affn_scope = from->affn_scope; 3758 to->ordered = from->ordered; 3759 } 3760 3761 /* 3762 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the 3763 * comments in 'struct workqueue_attrs' definition. 3764 */ 3765 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) 3766 { 3767 attrs->affn_scope = WQ_AFFN_NR_TYPES; 3768 attrs->ordered = false; 3769 } 3770 3771 /* hash value of the content of @attr */ 3772 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 3773 { 3774 u32 hash = 0; 3775 3776 hash = jhash_1word(attrs->nice, hash); 3777 hash = jhash(cpumask_bits(attrs->cpumask), 3778 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3779 hash = jhash(cpumask_bits(attrs->__pod_cpumask), 3780 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 3781 hash = jhash_1word(attrs->affn_strict, hash); 3782 return hash; 3783 } 3784 3785 /* content equality test */ 3786 static bool wqattrs_equal(const struct workqueue_attrs *a, 3787 const struct workqueue_attrs *b) 3788 { 3789 if (a->nice != b->nice) 3790 return false; 3791 if (!cpumask_equal(a->cpumask, b->cpumask)) 3792 return false; 3793 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) 3794 return false; 3795 if (a->affn_strict != b->affn_strict) 3796 return false; 3797 return true; 3798 } 3799 3800 /* Update @attrs with actually available CPUs */ 3801 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs, 3802 const cpumask_t *unbound_cpumask) 3803 { 3804 /* 3805 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If 3806 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to 3807 * @unbound_cpumask. 3808 */ 3809 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask); 3810 if (unlikely(cpumask_empty(attrs->cpumask))) 3811 cpumask_copy(attrs->cpumask, unbound_cpumask); 3812 } 3813 3814 /* find wq_pod_type to use for @attrs */ 3815 static const struct wq_pod_type * 3816 wqattrs_pod_type(const struct workqueue_attrs *attrs) 3817 { 3818 struct wq_pod_type *pt = &wq_pod_types[attrs->affn_scope]; 3819 3820 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) && 3821 likely(pt->nr_pods)) 3822 return pt; 3823 3824 /* 3825 * Before workqueue_init_topology(), only SYSTEM is available which is 3826 * initialized in workqueue_init_early(). 3827 */ 3828 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 3829 BUG_ON(!pt->nr_pods); 3830 return pt; 3831 } 3832 3833 /** 3834 * init_worker_pool - initialize a newly zalloc'd worker_pool 3835 * @pool: worker_pool to initialize 3836 * 3837 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 3838 * 3839 * Return: 0 on success, -errno on failure. Even on failure, all fields 3840 * inside @pool proper are initialized and put_unbound_pool() can be called 3841 * on @pool safely to release it. 3842 */ 3843 static int init_worker_pool(struct worker_pool *pool) 3844 { 3845 raw_spin_lock_init(&pool->lock); 3846 pool->id = -1; 3847 pool->cpu = -1; 3848 pool->node = NUMA_NO_NODE; 3849 pool->flags |= POOL_DISASSOCIATED; 3850 pool->watchdog_ts = jiffies; 3851 INIT_LIST_HEAD(&pool->worklist); 3852 INIT_LIST_HEAD(&pool->idle_list); 3853 hash_init(pool->busy_hash); 3854 3855 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 3856 INIT_WORK(&pool->idle_cull_work, idle_cull_fn); 3857 3858 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 3859 3860 INIT_LIST_HEAD(&pool->workers); 3861 INIT_LIST_HEAD(&pool->dying_workers); 3862 3863 ida_init(&pool->worker_ida); 3864 INIT_HLIST_NODE(&pool->hash_node); 3865 pool->refcnt = 1; 3866 3867 /* shouldn't fail above this point */ 3868 pool->attrs = alloc_workqueue_attrs(); 3869 if (!pool->attrs) 3870 return -ENOMEM; 3871 3872 wqattrs_clear_for_pool(pool->attrs); 3873 3874 return 0; 3875 } 3876 3877 #ifdef CONFIG_LOCKDEP 3878 static void wq_init_lockdep(struct workqueue_struct *wq) 3879 { 3880 char *lock_name; 3881 3882 lockdep_register_key(&wq->key); 3883 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); 3884 if (!lock_name) 3885 lock_name = wq->name; 3886 3887 wq->lock_name = lock_name; 3888 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); 3889 } 3890 3891 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3892 { 3893 lockdep_unregister_key(&wq->key); 3894 } 3895 3896 static void wq_free_lockdep(struct workqueue_struct *wq) 3897 { 3898 if (wq->lock_name != wq->name) 3899 kfree(wq->lock_name); 3900 } 3901 #else 3902 static void wq_init_lockdep(struct workqueue_struct *wq) 3903 { 3904 } 3905 3906 static void wq_unregister_lockdep(struct workqueue_struct *wq) 3907 { 3908 } 3909 3910 static void wq_free_lockdep(struct workqueue_struct *wq) 3911 { 3912 } 3913 #endif 3914 3915 static void rcu_free_wq(struct rcu_head *rcu) 3916 { 3917 struct workqueue_struct *wq = 3918 container_of(rcu, struct workqueue_struct, rcu); 3919 3920 wq_free_lockdep(wq); 3921 free_percpu(wq->cpu_pwq); 3922 free_workqueue_attrs(wq->unbound_attrs); 3923 kfree(wq); 3924 } 3925 3926 static void rcu_free_pool(struct rcu_head *rcu) 3927 { 3928 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 3929 3930 ida_destroy(&pool->worker_ida); 3931 free_workqueue_attrs(pool->attrs); 3932 kfree(pool); 3933 } 3934 3935 /** 3936 * put_unbound_pool - put a worker_pool 3937 * @pool: worker_pool to put 3938 * 3939 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU 3940 * safe manner. get_unbound_pool() calls this function on its failure path 3941 * and this function should be able to release pools which went through, 3942 * successfully or not, init_worker_pool(). 3943 * 3944 * Should be called with wq_pool_mutex held. 3945 */ 3946 static void put_unbound_pool(struct worker_pool *pool) 3947 { 3948 DECLARE_COMPLETION_ONSTACK(detach_completion); 3949 struct worker *worker; 3950 LIST_HEAD(cull_list); 3951 3952 lockdep_assert_held(&wq_pool_mutex); 3953 3954 if (--pool->refcnt) 3955 return; 3956 3957 /* sanity checks */ 3958 if (WARN_ON(!(pool->cpu < 0)) || 3959 WARN_ON(!list_empty(&pool->worklist))) 3960 return; 3961 3962 /* release id and unhash */ 3963 if (pool->id >= 0) 3964 idr_remove(&worker_pool_idr, pool->id); 3965 hash_del(&pool->hash_node); 3966 3967 /* 3968 * Become the manager and destroy all workers. This prevents 3969 * @pool's workers from blocking on attach_mutex. We're the last 3970 * manager and @pool gets freed with the flag set. 3971 * 3972 * Having a concurrent manager is quite unlikely to happen as we can 3973 * only get here with 3974 * pwq->refcnt == pool->refcnt == 0 3975 * which implies no work queued to the pool, which implies no worker can 3976 * become the manager. However a worker could have taken the role of 3977 * manager before the refcnts dropped to 0, since maybe_create_worker() 3978 * drops pool->lock 3979 */ 3980 while (true) { 3981 rcuwait_wait_event(&manager_wait, 3982 !(pool->flags & POOL_MANAGER_ACTIVE), 3983 TASK_UNINTERRUPTIBLE); 3984 3985 mutex_lock(&wq_pool_attach_mutex); 3986 raw_spin_lock_irq(&pool->lock); 3987 if (!(pool->flags & POOL_MANAGER_ACTIVE)) { 3988 pool->flags |= POOL_MANAGER_ACTIVE; 3989 break; 3990 } 3991 raw_spin_unlock_irq(&pool->lock); 3992 mutex_unlock(&wq_pool_attach_mutex); 3993 } 3994 3995 while ((worker = first_idle_worker(pool))) 3996 set_worker_dying(worker, &cull_list); 3997 WARN_ON(pool->nr_workers || pool->nr_idle); 3998 raw_spin_unlock_irq(&pool->lock); 3999 4000 wake_dying_workers(&cull_list); 4001 4002 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers)) 4003 pool->detach_completion = &detach_completion; 4004 mutex_unlock(&wq_pool_attach_mutex); 4005 4006 if (pool->detach_completion) 4007 wait_for_completion(pool->detach_completion); 4008 4009 /* shut down the timers */ 4010 del_timer_sync(&pool->idle_timer); 4011 cancel_work_sync(&pool->idle_cull_work); 4012 del_timer_sync(&pool->mayday_timer); 4013 4014 /* RCU protected to allow dereferences from get_work_pool() */ 4015 call_rcu(&pool->rcu, rcu_free_pool); 4016 } 4017 4018 /** 4019 * get_unbound_pool - get a worker_pool with the specified attributes 4020 * @attrs: the attributes of the worker_pool to get 4021 * 4022 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4023 * reference count and return it. If there already is a matching 4024 * worker_pool, it will be used; otherwise, this function attempts to 4025 * create a new one. 4026 * 4027 * Should be called with wq_pool_mutex held. 4028 * 4029 * Return: On success, a worker_pool with the same attributes as @attrs. 4030 * On failure, %NULL. 4031 */ 4032 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4033 { 4034 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA]; 4035 u32 hash = wqattrs_hash(attrs); 4036 struct worker_pool *pool; 4037 int pod, node = NUMA_NO_NODE; 4038 4039 lockdep_assert_held(&wq_pool_mutex); 4040 4041 /* do we already have a matching pool? */ 4042 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4043 if (wqattrs_equal(pool->attrs, attrs)) { 4044 pool->refcnt++; 4045 return pool; 4046 } 4047 } 4048 4049 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */ 4050 for (pod = 0; pod < pt->nr_pods; pod++) { 4051 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) { 4052 node = pt->pod_node[pod]; 4053 break; 4054 } 4055 } 4056 4057 /* nope, create a new one */ 4058 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node); 4059 if (!pool || init_worker_pool(pool) < 0) 4060 goto fail; 4061 4062 pool->node = node; 4063 copy_workqueue_attrs(pool->attrs, attrs); 4064 wqattrs_clear_for_pool(pool->attrs); 4065 4066 if (worker_pool_assign_id(pool) < 0) 4067 goto fail; 4068 4069 /* create and start the initial worker */ 4070 if (wq_online && !create_worker(pool)) 4071 goto fail; 4072 4073 /* install */ 4074 hash_add(unbound_pool_hash, &pool->hash_node, hash); 4075 4076 return pool; 4077 fail: 4078 if (pool) 4079 put_unbound_pool(pool); 4080 return NULL; 4081 } 4082 4083 static void rcu_free_pwq(struct rcu_head *rcu) 4084 { 4085 kmem_cache_free(pwq_cache, 4086 container_of(rcu, struct pool_workqueue, rcu)); 4087 } 4088 4089 /* 4090 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero 4091 * refcnt and needs to be destroyed. 4092 */ 4093 static void pwq_release_workfn(struct kthread_work *work) 4094 { 4095 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 4096 release_work); 4097 struct workqueue_struct *wq = pwq->wq; 4098 struct worker_pool *pool = pwq->pool; 4099 bool is_last = false; 4100 4101 /* 4102 * When @pwq is not linked, it doesn't hold any reference to the 4103 * @wq, and @wq is invalid to access. 4104 */ 4105 if (!list_empty(&pwq->pwqs_node)) { 4106 mutex_lock(&wq->mutex); 4107 list_del_rcu(&pwq->pwqs_node); 4108 is_last = list_empty(&wq->pwqs); 4109 mutex_unlock(&wq->mutex); 4110 } 4111 4112 if (wq->flags & WQ_UNBOUND) { 4113 mutex_lock(&wq_pool_mutex); 4114 put_unbound_pool(pool); 4115 mutex_unlock(&wq_pool_mutex); 4116 } 4117 4118 call_rcu(&pwq->rcu, rcu_free_pwq); 4119 4120 /* 4121 * If we're the last pwq going away, @wq is already dead and no one 4122 * is gonna access it anymore. Schedule RCU free. 4123 */ 4124 if (is_last) { 4125 wq_unregister_lockdep(wq); 4126 call_rcu(&wq->rcu, rcu_free_wq); 4127 } 4128 } 4129 4130 /** 4131 * pwq_adjust_max_active - update a pwq's max_active to the current setting 4132 * @pwq: target pool_workqueue 4133 * 4134 * If @pwq isn't freezing, set @pwq->max_active to the associated 4135 * workqueue's saved_max_active and activate inactive work items 4136 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. 4137 */ 4138 static void pwq_adjust_max_active(struct pool_workqueue *pwq) 4139 { 4140 struct workqueue_struct *wq = pwq->wq; 4141 bool freezable = wq->flags & WQ_FREEZABLE; 4142 unsigned long flags; 4143 4144 /* for @wq->saved_max_active */ 4145 lockdep_assert_held(&wq->mutex); 4146 4147 /* fast exit for non-freezable wqs */ 4148 if (!freezable && pwq->max_active == wq->saved_max_active) 4149 return; 4150 4151 /* this function can be called during early boot w/ irq disabled */ 4152 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 4153 4154 /* 4155 * During [un]freezing, the caller is responsible for ensuring that 4156 * this function is called at least once after @workqueue_freezing 4157 * is updated and visible. 4158 */ 4159 if (!freezable || !workqueue_freezing) { 4160 pwq->max_active = wq->saved_max_active; 4161 4162 while (!list_empty(&pwq->inactive_works) && 4163 pwq->nr_active < pwq->max_active) 4164 pwq_activate_first_inactive(pwq); 4165 4166 kick_pool(pwq->pool); 4167 } else { 4168 pwq->max_active = 0; 4169 } 4170 4171 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 4172 } 4173 4174 /* initialize newly allocated @pwq which is associated with @wq and @pool */ 4175 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 4176 struct worker_pool *pool) 4177 { 4178 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 4179 4180 memset(pwq, 0, sizeof(*pwq)); 4181 4182 pwq->pool = pool; 4183 pwq->wq = wq; 4184 pwq->flush_color = -1; 4185 pwq->refcnt = 1; 4186 INIT_LIST_HEAD(&pwq->inactive_works); 4187 INIT_LIST_HEAD(&pwq->pwqs_node); 4188 INIT_LIST_HEAD(&pwq->mayday_node); 4189 kthread_init_work(&pwq->release_work, pwq_release_workfn); 4190 } 4191 4192 /* sync @pwq with the current state of its associated wq and link it */ 4193 static void link_pwq(struct pool_workqueue *pwq) 4194 { 4195 struct workqueue_struct *wq = pwq->wq; 4196 4197 lockdep_assert_held(&wq->mutex); 4198 4199 /* may be called multiple times, ignore if already linked */ 4200 if (!list_empty(&pwq->pwqs_node)) 4201 return; 4202 4203 /* set the matching work_color */ 4204 pwq->work_color = wq->work_color; 4205 4206 /* sync max_active to the current setting */ 4207 pwq_adjust_max_active(pwq); 4208 4209 /* link in @pwq */ 4210 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 4211 } 4212 4213 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 4214 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 4215 const struct workqueue_attrs *attrs) 4216 { 4217 struct worker_pool *pool; 4218 struct pool_workqueue *pwq; 4219 4220 lockdep_assert_held(&wq_pool_mutex); 4221 4222 pool = get_unbound_pool(attrs); 4223 if (!pool) 4224 return NULL; 4225 4226 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 4227 if (!pwq) { 4228 put_unbound_pool(pool); 4229 return NULL; 4230 } 4231 4232 init_pwq(pwq, wq, pool); 4233 return pwq; 4234 } 4235 4236 /** 4237 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod 4238 * @attrs: the wq_attrs of the default pwq of the target workqueue 4239 * @cpu: the target CPU 4240 * @cpu_going_down: if >= 0, the CPU to consider as offline 4241 * 4242 * Calculate the cpumask a workqueue with @attrs should use on @pod. If 4243 * @cpu_going_down is >= 0, that cpu is considered offline during calculation. 4244 * The result is stored in @attrs->__pod_cpumask. 4245 * 4246 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled 4247 * and @pod has online CPUs requested by @attrs, the returned cpumask is the 4248 * intersection of the possible CPUs of @pod and @attrs->cpumask. 4249 * 4250 * The caller is responsible for ensuring that the cpumask of @pod stays stable. 4251 */ 4252 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu, 4253 int cpu_going_down) 4254 { 4255 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 4256 int pod = pt->cpu_pod[cpu]; 4257 4258 /* does @pod have any online CPUs @attrs wants? */ 4259 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask); 4260 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask); 4261 if (cpu_going_down >= 0) 4262 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask); 4263 4264 if (cpumask_empty(attrs->__pod_cpumask)) { 4265 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask); 4266 return; 4267 } 4268 4269 /* yeap, return possible CPUs in @pod that @attrs wants */ 4270 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]); 4271 4272 if (cpumask_empty(attrs->__pod_cpumask)) 4273 pr_warn_once("WARNING: workqueue cpumask: online intersect > " 4274 "possible intersect\n"); 4275 } 4276 4277 /* install @pwq into @wq's cpu_pwq and return the old pwq */ 4278 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq, 4279 int cpu, struct pool_workqueue *pwq) 4280 { 4281 struct pool_workqueue *old_pwq; 4282 4283 lockdep_assert_held(&wq_pool_mutex); 4284 lockdep_assert_held(&wq->mutex); 4285 4286 /* link_pwq() can handle duplicate calls */ 4287 link_pwq(pwq); 4288 4289 old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4290 rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq); 4291 return old_pwq; 4292 } 4293 4294 /* context to store the prepared attrs & pwqs before applying */ 4295 struct apply_wqattrs_ctx { 4296 struct workqueue_struct *wq; /* target workqueue */ 4297 struct workqueue_attrs *attrs; /* attrs to apply */ 4298 struct list_head list; /* queued for batching commit */ 4299 struct pool_workqueue *dfl_pwq; 4300 struct pool_workqueue *pwq_tbl[]; 4301 }; 4302 4303 /* free the resources after success or abort */ 4304 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 4305 { 4306 if (ctx) { 4307 int cpu; 4308 4309 for_each_possible_cpu(cpu) 4310 put_pwq_unlocked(ctx->pwq_tbl[cpu]); 4311 put_pwq_unlocked(ctx->dfl_pwq); 4312 4313 free_workqueue_attrs(ctx->attrs); 4314 4315 kfree(ctx); 4316 } 4317 } 4318 4319 /* allocate the attrs and pwqs for later installation */ 4320 static struct apply_wqattrs_ctx * 4321 apply_wqattrs_prepare(struct workqueue_struct *wq, 4322 const struct workqueue_attrs *attrs, 4323 const cpumask_var_t unbound_cpumask) 4324 { 4325 struct apply_wqattrs_ctx *ctx; 4326 struct workqueue_attrs *new_attrs; 4327 int cpu; 4328 4329 lockdep_assert_held(&wq_pool_mutex); 4330 4331 if (WARN_ON(attrs->affn_scope < 0 || 4332 attrs->affn_scope >= WQ_AFFN_NR_TYPES)) 4333 return ERR_PTR(-EINVAL); 4334 4335 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL); 4336 4337 new_attrs = alloc_workqueue_attrs(); 4338 if (!ctx || !new_attrs) 4339 goto out_free; 4340 4341 /* 4342 * If something goes wrong during CPU up/down, we'll fall back to 4343 * the default pwq covering whole @attrs->cpumask. Always create 4344 * it even if we don't use it immediately. 4345 */ 4346 copy_workqueue_attrs(new_attrs, attrs); 4347 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask); 4348 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4349 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 4350 if (!ctx->dfl_pwq) 4351 goto out_free; 4352 4353 for_each_possible_cpu(cpu) { 4354 if (new_attrs->ordered) { 4355 ctx->dfl_pwq->refcnt++; 4356 ctx->pwq_tbl[cpu] = ctx->dfl_pwq; 4357 } else { 4358 wq_calc_pod_cpumask(new_attrs, cpu, -1); 4359 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs); 4360 if (!ctx->pwq_tbl[cpu]) 4361 goto out_free; 4362 } 4363 } 4364 4365 /* save the user configured attrs and sanitize it. */ 4366 copy_workqueue_attrs(new_attrs, attrs); 4367 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 4368 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask); 4369 ctx->attrs = new_attrs; 4370 4371 ctx->wq = wq; 4372 return ctx; 4373 4374 out_free: 4375 free_workqueue_attrs(new_attrs); 4376 apply_wqattrs_cleanup(ctx); 4377 return ERR_PTR(-ENOMEM); 4378 } 4379 4380 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 4381 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 4382 { 4383 int cpu; 4384 4385 /* all pwqs have been created successfully, let's install'em */ 4386 mutex_lock(&ctx->wq->mutex); 4387 4388 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 4389 4390 /* save the previous pwq and install the new one */ 4391 for_each_possible_cpu(cpu) 4392 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu, 4393 ctx->pwq_tbl[cpu]); 4394 4395 /* @dfl_pwq might not have been used, ensure it's linked */ 4396 link_pwq(ctx->dfl_pwq); 4397 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 4398 4399 mutex_unlock(&ctx->wq->mutex); 4400 } 4401 4402 static void apply_wqattrs_lock(void) 4403 { 4404 /* CPUs should stay stable across pwq creations and installations */ 4405 cpus_read_lock(); 4406 mutex_lock(&wq_pool_mutex); 4407 } 4408 4409 static void apply_wqattrs_unlock(void) 4410 { 4411 mutex_unlock(&wq_pool_mutex); 4412 cpus_read_unlock(); 4413 } 4414 4415 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 4416 const struct workqueue_attrs *attrs) 4417 { 4418 struct apply_wqattrs_ctx *ctx; 4419 4420 /* only unbound workqueues can change attributes */ 4421 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 4422 return -EINVAL; 4423 4424 /* creating multiple pwqs breaks ordering guarantee */ 4425 if (!list_empty(&wq->pwqs)) { 4426 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4427 return -EINVAL; 4428 4429 wq->flags &= ~__WQ_ORDERED; 4430 } 4431 4432 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask); 4433 if (IS_ERR(ctx)) 4434 return PTR_ERR(ctx); 4435 4436 /* the ctx has been prepared successfully, let's commit it */ 4437 apply_wqattrs_commit(ctx); 4438 apply_wqattrs_cleanup(ctx); 4439 4440 return 0; 4441 } 4442 4443 /** 4444 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 4445 * @wq: the target workqueue 4446 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 4447 * 4448 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps 4449 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that 4450 * work items are affine to the pod it was issued on. Older pwqs are released as 4451 * in-flight work items finish. Note that a work item which repeatedly requeues 4452 * itself back-to-back will stay on its current pwq. 4453 * 4454 * Performs GFP_KERNEL allocations. 4455 * 4456 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock(). 4457 * 4458 * Return: 0 on success and -errno on failure. 4459 */ 4460 int apply_workqueue_attrs(struct workqueue_struct *wq, 4461 const struct workqueue_attrs *attrs) 4462 { 4463 int ret; 4464 4465 lockdep_assert_cpus_held(); 4466 4467 mutex_lock(&wq_pool_mutex); 4468 ret = apply_workqueue_attrs_locked(wq, attrs); 4469 mutex_unlock(&wq_pool_mutex); 4470 4471 return ret; 4472 } 4473 4474 /** 4475 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug 4476 * @wq: the target workqueue 4477 * @cpu: the CPU to update pool association for 4478 * @hotplug_cpu: the CPU coming up or going down 4479 * @online: whether @cpu is coming up or going down 4480 * 4481 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 4482 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of 4483 * @wq accordingly. 4484 * 4485 * 4486 * If pod affinity can't be adjusted due to memory allocation failure, it falls 4487 * back to @wq->dfl_pwq which may not be optimal but is always correct. 4488 * 4489 * Note that when the last allowed CPU of a pod goes offline for a workqueue 4490 * with a cpumask spanning multiple pods, the workers which were already 4491 * executing the work items for the workqueue will lose their CPU affinity and 4492 * may execute on any CPU. This is similar to how per-cpu workqueues behave on 4493 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's 4494 * responsibility to flush the work item from CPU_DOWN_PREPARE. 4495 */ 4496 static void wq_update_pod(struct workqueue_struct *wq, int cpu, 4497 int hotplug_cpu, bool online) 4498 { 4499 int off_cpu = online ? -1 : hotplug_cpu; 4500 struct pool_workqueue *old_pwq = NULL, *pwq; 4501 struct workqueue_attrs *target_attrs; 4502 4503 lockdep_assert_held(&wq_pool_mutex); 4504 4505 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered) 4506 return; 4507 4508 /* 4509 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 4510 * Let's use a preallocated one. The following buf is protected by 4511 * CPU hotplug exclusion. 4512 */ 4513 target_attrs = wq_update_pod_attrs_buf; 4514 4515 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 4516 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask); 4517 4518 /* nothing to do if the target cpumask matches the current pwq */ 4519 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu); 4520 pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu), 4521 lockdep_is_held(&wq_pool_mutex)); 4522 if (wqattrs_equal(target_attrs, pwq->pool->attrs)) 4523 return; 4524 4525 /* create a new pwq */ 4526 pwq = alloc_unbound_pwq(wq, target_attrs); 4527 if (!pwq) { 4528 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n", 4529 wq->name); 4530 goto use_dfl_pwq; 4531 } 4532 4533 /* Install the new pwq. */ 4534 mutex_lock(&wq->mutex); 4535 old_pwq = install_unbound_pwq(wq, cpu, pwq); 4536 goto out_unlock; 4537 4538 use_dfl_pwq: 4539 mutex_lock(&wq->mutex); 4540 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock); 4541 get_pwq(wq->dfl_pwq); 4542 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock); 4543 old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq); 4544 out_unlock: 4545 mutex_unlock(&wq->mutex); 4546 put_pwq_unlocked(old_pwq); 4547 } 4548 4549 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 4550 { 4551 bool highpri = wq->flags & WQ_HIGHPRI; 4552 int cpu, ret; 4553 4554 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *); 4555 if (!wq->cpu_pwq) 4556 goto enomem; 4557 4558 if (!(wq->flags & WQ_UNBOUND)) { 4559 for_each_possible_cpu(cpu) { 4560 struct pool_workqueue **pwq_p = 4561 per_cpu_ptr(wq->cpu_pwq, cpu); 4562 struct worker_pool *pool = 4563 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]); 4564 4565 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, 4566 pool->node); 4567 if (!*pwq_p) 4568 goto enomem; 4569 4570 init_pwq(*pwq_p, wq, pool); 4571 4572 mutex_lock(&wq->mutex); 4573 link_pwq(*pwq_p); 4574 mutex_unlock(&wq->mutex); 4575 } 4576 return 0; 4577 } 4578 4579 cpus_read_lock(); 4580 if (wq->flags & __WQ_ORDERED) { 4581 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 4582 /* there should only be single pwq for ordering guarantee */ 4583 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 4584 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 4585 "ordering guarantee broken for workqueue %s\n", wq->name); 4586 } else { 4587 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 4588 } 4589 cpus_read_unlock(); 4590 4591 return ret; 4592 4593 enomem: 4594 if (wq->cpu_pwq) { 4595 for_each_possible_cpu(cpu) 4596 kfree(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4597 free_percpu(wq->cpu_pwq); 4598 wq->cpu_pwq = NULL; 4599 } 4600 return -ENOMEM; 4601 } 4602 4603 static int wq_clamp_max_active(int max_active, unsigned int flags, 4604 const char *name) 4605 { 4606 if (max_active < 1 || max_active > WQ_MAX_ACTIVE) 4607 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 4608 max_active, name, 1, WQ_MAX_ACTIVE); 4609 4610 return clamp_val(max_active, 1, WQ_MAX_ACTIVE); 4611 } 4612 4613 /* 4614 * Workqueues which may be used during memory reclaim should have a rescuer 4615 * to guarantee forward progress. 4616 */ 4617 static int init_rescuer(struct workqueue_struct *wq) 4618 { 4619 struct worker *rescuer; 4620 int ret; 4621 4622 if (!(wq->flags & WQ_MEM_RECLAIM)) 4623 return 0; 4624 4625 rescuer = alloc_worker(NUMA_NO_NODE); 4626 if (!rescuer) { 4627 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n", 4628 wq->name); 4629 return -ENOMEM; 4630 } 4631 4632 rescuer->rescue_wq = wq; 4633 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name); 4634 if (IS_ERR(rescuer->task)) { 4635 ret = PTR_ERR(rescuer->task); 4636 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe", 4637 wq->name, ERR_PTR(ret)); 4638 kfree(rescuer); 4639 return ret; 4640 } 4641 4642 wq->rescuer = rescuer; 4643 kthread_bind_mask(rescuer->task, cpu_possible_mask); 4644 wake_up_process(rescuer->task); 4645 4646 return 0; 4647 } 4648 4649 __printf(1, 4) 4650 struct workqueue_struct *alloc_workqueue(const char *fmt, 4651 unsigned int flags, 4652 int max_active, ...) 4653 { 4654 va_list args; 4655 struct workqueue_struct *wq; 4656 struct pool_workqueue *pwq; 4657 4658 /* 4659 * Unbound && max_active == 1 used to imply ordered, which is no longer 4660 * the case on many machines due to per-pod pools. While 4661 * alloc_ordered_workqueue() is the right way to create an ordered 4662 * workqueue, keep the previous behavior to avoid subtle breakages. 4663 */ 4664 if ((flags & WQ_UNBOUND) && max_active == 1) 4665 flags |= __WQ_ORDERED; 4666 4667 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 4668 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 4669 flags |= WQ_UNBOUND; 4670 4671 /* allocate wq and format name */ 4672 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4673 if (!wq) 4674 return NULL; 4675 4676 if (flags & WQ_UNBOUND) { 4677 wq->unbound_attrs = alloc_workqueue_attrs(); 4678 if (!wq->unbound_attrs) 4679 goto err_free_wq; 4680 } 4681 4682 va_start(args, max_active); 4683 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 4684 va_end(args); 4685 4686 max_active = max_active ?: WQ_DFL_ACTIVE; 4687 max_active = wq_clamp_max_active(max_active, flags, wq->name); 4688 4689 /* init wq */ 4690 wq->flags = flags; 4691 wq->saved_max_active = max_active; 4692 mutex_init(&wq->mutex); 4693 atomic_set(&wq->nr_pwqs_to_flush, 0); 4694 INIT_LIST_HEAD(&wq->pwqs); 4695 INIT_LIST_HEAD(&wq->flusher_queue); 4696 INIT_LIST_HEAD(&wq->flusher_overflow); 4697 INIT_LIST_HEAD(&wq->maydays); 4698 4699 wq_init_lockdep(wq); 4700 INIT_LIST_HEAD(&wq->list); 4701 4702 if (alloc_and_link_pwqs(wq) < 0) 4703 goto err_unreg_lockdep; 4704 4705 if (wq_online && init_rescuer(wq) < 0) 4706 goto err_destroy; 4707 4708 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 4709 goto err_destroy; 4710 4711 /* 4712 * wq_pool_mutex protects global freeze state and workqueues list. 4713 * Grab it, adjust max_active and add the new @wq to workqueues 4714 * list. 4715 */ 4716 mutex_lock(&wq_pool_mutex); 4717 4718 mutex_lock(&wq->mutex); 4719 for_each_pwq(pwq, wq) 4720 pwq_adjust_max_active(pwq); 4721 mutex_unlock(&wq->mutex); 4722 4723 list_add_tail_rcu(&wq->list, &workqueues); 4724 4725 mutex_unlock(&wq_pool_mutex); 4726 4727 return wq; 4728 4729 err_unreg_lockdep: 4730 wq_unregister_lockdep(wq); 4731 wq_free_lockdep(wq); 4732 err_free_wq: 4733 free_workqueue_attrs(wq->unbound_attrs); 4734 kfree(wq); 4735 return NULL; 4736 err_destroy: 4737 destroy_workqueue(wq); 4738 return NULL; 4739 } 4740 EXPORT_SYMBOL_GPL(alloc_workqueue); 4741 4742 static bool pwq_busy(struct pool_workqueue *pwq) 4743 { 4744 int i; 4745 4746 for (i = 0; i < WORK_NR_COLORS; i++) 4747 if (pwq->nr_in_flight[i]) 4748 return true; 4749 4750 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) 4751 return true; 4752 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) 4753 return true; 4754 4755 return false; 4756 } 4757 4758 /** 4759 * destroy_workqueue - safely terminate a workqueue 4760 * @wq: target workqueue 4761 * 4762 * Safely destroy a workqueue. All work currently pending will be done first. 4763 */ 4764 void destroy_workqueue(struct workqueue_struct *wq) 4765 { 4766 struct pool_workqueue *pwq; 4767 int cpu; 4768 4769 /* 4770 * Remove it from sysfs first so that sanity check failure doesn't 4771 * lead to sysfs name conflicts. 4772 */ 4773 workqueue_sysfs_unregister(wq); 4774 4775 /* mark the workqueue destruction is in progress */ 4776 mutex_lock(&wq->mutex); 4777 wq->flags |= __WQ_DESTROYING; 4778 mutex_unlock(&wq->mutex); 4779 4780 /* drain it before proceeding with destruction */ 4781 drain_workqueue(wq); 4782 4783 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */ 4784 if (wq->rescuer) { 4785 struct worker *rescuer = wq->rescuer; 4786 4787 /* this prevents new queueing */ 4788 raw_spin_lock_irq(&wq_mayday_lock); 4789 wq->rescuer = NULL; 4790 raw_spin_unlock_irq(&wq_mayday_lock); 4791 4792 /* rescuer will empty maydays list before exiting */ 4793 kthread_stop(rescuer->task); 4794 kfree(rescuer); 4795 } 4796 4797 /* 4798 * Sanity checks - grab all the locks so that we wait for all 4799 * in-flight operations which may do put_pwq(). 4800 */ 4801 mutex_lock(&wq_pool_mutex); 4802 mutex_lock(&wq->mutex); 4803 for_each_pwq(pwq, wq) { 4804 raw_spin_lock_irq(&pwq->pool->lock); 4805 if (WARN_ON(pwq_busy(pwq))) { 4806 pr_warn("%s: %s has the following busy pwq\n", 4807 __func__, wq->name); 4808 show_pwq(pwq); 4809 raw_spin_unlock_irq(&pwq->pool->lock); 4810 mutex_unlock(&wq->mutex); 4811 mutex_unlock(&wq_pool_mutex); 4812 show_one_workqueue(wq); 4813 return; 4814 } 4815 raw_spin_unlock_irq(&pwq->pool->lock); 4816 } 4817 mutex_unlock(&wq->mutex); 4818 4819 /* 4820 * wq list is used to freeze wq, remove from list after 4821 * flushing is complete in case freeze races us. 4822 */ 4823 list_del_rcu(&wq->list); 4824 mutex_unlock(&wq_pool_mutex); 4825 4826 /* 4827 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq 4828 * to put the base refs. @wq will be auto-destroyed from the last 4829 * pwq_put. RCU read lock prevents @wq from going away from under us. 4830 */ 4831 rcu_read_lock(); 4832 4833 for_each_possible_cpu(cpu) { 4834 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu)); 4835 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL); 4836 put_pwq_unlocked(pwq); 4837 } 4838 4839 put_pwq_unlocked(wq->dfl_pwq); 4840 wq->dfl_pwq = NULL; 4841 4842 rcu_read_unlock(); 4843 } 4844 EXPORT_SYMBOL_GPL(destroy_workqueue); 4845 4846 /** 4847 * workqueue_set_max_active - adjust max_active of a workqueue 4848 * @wq: target workqueue 4849 * @max_active: new max_active value. 4850 * 4851 * Set max_active of @wq to @max_active. 4852 * 4853 * CONTEXT: 4854 * Don't call from IRQ context. 4855 */ 4856 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 4857 { 4858 struct pool_workqueue *pwq; 4859 4860 /* disallow meddling with max_active for ordered workqueues */ 4861 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 4862 return; 4863 4864 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4865 4866 mutex_lock(&wq->mutex); 4867 4868 wq->flags &= ~__WQ_ORDERED; 4869 wq->saved_max_active = max_active; 4870 4871 for_each_pwq(pwq, wq) 4872 pwq_adjust_max_active(pwq); 4873 4874 mutex_unlock(&wq->mutex); 4875 } 4876 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4877 4878 /** 4879 * current_work - retrieve %current task's work struct 4880 * 4881 * Determine if %current task is a workqueue worker and what it's working on. 4882 * Useful to find out the context that the %current task is running in. 4883 * 4884 * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 4885 */ 4886 struct work_struct *current_work(void) 4887 { 4888 struct worker *worker = current_wq_worker(); 4889 4890 return worker ? worker->current_work : NULL; 4891 } 4892 EXPORT_SYMBOL(current_work); 4893 4894 /** 4895 * current_is_workqueue_rescuer - is %current workqueue rescuer? 4896 * 4897 * Determine whether %current is a workqueue rescuer. Can be used from 4898 * work functions to determine whether it's being run off the rescuer task. 4899 * 4900 * Return: %true if %current is a workqueue rescuer. %false otherwise. 4901 */ 4902 bool current_is_workqueue_rescuer(void) 4903 { 4904 struct worker *worker = current_wq_worker(); 4905 4906 return worker && worker->rescue_wq; 4907 } 4908 4909 /** 4910 * workqueue_congested - test whether a workqueue is congested 4911 * @cpu: CPU in question 4912 * @wq: target workqueue 4913 * 4914 * Test whether @wq's cpu workqueue for @cpu is congested. There is 4915 * no synchronization around this function and the test result is 4916 * unreliable and only useful as advisory hints or for debugging. 4917 * 4918 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 4919 * 4920 * With the exception of ordered workqueues, all workqueues have per-cpu 4921 * pool_workqueues, each with its own congested state. A workqueue being 4922 * congested on one CPU doesn't mean that the workqueue is contested on any 4923 * other CPUs. 4924 * 4925 * Return: 4926 * %true if congested, %false otherwise. 4927 */ 4928 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 4929 { 4930 struct pool_workqueue *pwq; 4931 bool ret; 4932 4933 rcu_read_lock(); 4934 preempt_disable(); 4935 4936 if (cpu == WORK_CPU_UNBOUND) 4937 cpu = smp_processor_id(); 4938 4939 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); 4940 ret = !list_empty(&pwq->inactive_works); 4941 4942 preempt_enable(); 4943 rcu_read_unlock(); 4944 4945 return ret; 4946 } 4947 EXPORT_SYMBOL_GPL(workqueue_congested); 4948 4949 /** 4950 * work_busy - test whether a work is currently pending or running 4951 * @work: the work to be tested 4952 * 4953 * Test whether @work is currently pending or running. There is no 4954 * synchronization around this function and the test result is 4955 * unreliable and only useful as advisory hints or for debugging. 4956 * 4957 * Return: 4958 * OR'd bitmask of WORK_BUSY_* bits. 4959 */ 4960 unsigned int work_busy(struct work_struct *work) 4961 { 4962 struct worker_pool *pool; 4963 unsigned long flags; 4964 unsigned int ret = 0; 4965 4966 if (work_pending(work)) 4967 ret |= WORK_BUSY_PENDING; 4968 4969 rcu_read_lock(); 4970 pool = get_work_pool(work); 4971 if (pool) { 4972 raw_spin_lock_irqsave(&pool->lock, flags); 4973 if (find_worker_executing_work(pool, work)) 4974 ret |= WORK_BUSY_RUNNING; 4975 raw_spin_unlock_irqrestore(&pool->lock, flags); 4976 } 4977 rcu_read_unlock(); 4978 4979 return ret; 4980 } 4981 EXPORT_SYMBOL_GPL(work_busy); 4982 4983 /** 4984 * set_worker_desc - set description for the current work item 4985 * @fmt: printf-style format string 4986 * @...: arguments for the format string 4987 * 4988 * This function can be called by a running work function to describe what 4989 * the work item is about. If the worker task gets dumped, this 4990 * information will be printed out together to help debugging. The 4991 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 4992 */ 4993 void set_worker_desc(const char *fmt, ...) 4994 { 4995 struct worker *worker = current_wq_worker(); 4996 va_list args; 4997 4998 if (worker) { 4999 va_start(args, fmt); 5000 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5001 va_end(args); 5002 } 5003 } 5004 EXPORT_SYMBOL_GPL(set_worker_desc); 5005 5006 /** 5007 * print_worker_info - print out worker information and description 5008 * @log_lvl: the log level to use when printing 5009 * @task: target task 5010 * 5011 * If @task is a worker and currently executing a work item, print out the 5012 * name of the workqueue being serviced and worker description set with 5013 * set_worker_desc() by the currently executing work item. 5014 * 5015 * This function can be safely called on any task as long as the 5016 * task_struct itself is accessible. While safe, this function isn't 5017 * synchronized and may print out mixups or garbages of limited length. 5018 */ 5019 void print_worker_info(const char *log_lvl, struct task_struct *task) 5020 { 5021 work_func_t *fn = NULL; 5022 char name[WQ_NAME_LEN] = { }; 5023 char desc[WORKER_DESC_LEN] = { }; 5024 struct pool_workqueue *pwq = NULL; 5025 struct workqueue_struct *wq = NULL; 5026 struct worker *worker; 5027 5028 if (!(task->flags & PF_WQ_WORKER)) 5029 return; 5030 5031 /* 5032 * This function is called without any synchronization and @task 5033 * could be in any state. Be careful with dereferences. 5034 */ 5035 worker = kthread_probe_data(task); 5036 5037 /* 5038 * Carefully copy the associated workqueue's workfn, name and desc. 5039 * Keep the original last '\0' in case the original is garbage. 5040 */ 5041 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); 5042 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); 5043 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); 5044 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); 5045 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); 5046 5047 if (fn || name[0] || desc[0]) { 5048 printk("%sWorkqueue: %s %ps", log_lvl, name, fn); 5049 if (strcmp(name, desc)) 5050 pr_cont(" (%s)", desc); 5051 pr_cont("\n"); 5052 } 5053 } 5054 5055 static void pr_cont_pool_info(struct worker_pool *pool) 5056 { 5057 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 5058 if (pool->node != NUMA_NO_NODE) 5059 pr_cont(" node=%d", pool->node); 5060 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 5061 } 5062 5063 struct pr_cont_work_struct { 5064 bool comma; 5065 work_func_t func; 5066 long ctr; 5067 }; 5068 5069 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp) 5070 { 5071 if (!pcwsp->ctr) 5072 goto out_record; 5073 if (func == pcwsp->func) { 5074 pcwsp->ctr++; 5075 return; 5076 } 5077 if (pcwsp->ctr == 1) 5078 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func); 5079 else 5080 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func); 5081 pcwsp->ctr = 0; 5082 out_record: 5083 if ((long)func == -1L) 5084 return; 5085 pcwsp->comma = comma; 5086 pcwsp->func = func; 5087 pcwsp->ctr = 1; 5088 } 5089 5090 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) 5091 { 5092 if (work->func == wq_barrier_func) { 5093 struct wq_barrier *barr; 5094 5095 barr = container_of(work, struct wq_barrier, work); 5096 5097 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5098 pr_cont("%s BAR(%d)", comma ? "," : "", 5099 task_pid_nr(barr->task)); 5100 } else { 5101 if (!comma) 5102 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp); 5103 pr_cont_work_flush(comma, work->func, pcwsp); 5104 } 5105 } 5106 5107 static void show_pwq(struct pool_workqueue *pwq) 5108 { 5109 struct pr_cont_work_struct pcws = { .ctr = 0, }; 5110 struct worker_pool *pool = pwq->pool; 5111 struct work_struct *work; 5112 struct worker *worker; 5113 bool has_in_flight = false, has_pending = false; 5114 int bkt; 5115 5116 pr_info(" pwq %d:", pool->id); 5117 pr_cont_pool_info(pool); 5118 5119 pr_cont(" active=%d/%d refcnt=%d%s\n", 5120 pwq->nr_active, pwq->max_active, pwq->refcnt, 5121 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 5122 5123 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5124 if (worker->current_pwq == pwq) { 5125 has_in_flight = true; 5126 break; 5127 } 5128 } 5129 if (has_in_flight) { 5130 bool comma = false; 5131 5132 pr_info(" in-flight:"); 5133 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 5134 if (worker->current_pwq != pwq) 5135 continue; 5136 5137 pr_cont("%s %d%s:%ps", comma ? "," : "", 5138 task_pid_nr(worker->task), 5139 worker->rescue_wq ? "(RESCUER)" : "", 5140 worker->current_func); 5141 list_for_each_entry(work, &worker->scheduled, entry) 5142 pr_cont_work(false, work, &pcws); 5143 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5144 comma = true; 5145 } 5146 pr_cont("\n"); 5147 } 5148 5149 list_for_each_entry(work, &pool->worklist, entry) { 5150 if (get_work_pwq(work) == pwq) { 5151 has_pending = true; 5152 break; 5153 } 5154 } 5155 if (has_pending) { 5156 bool comma = false; 5157 5158 pr_info(" pending:"); 5159 list_for_each_entry(work, &pool->worklist, entry) { 5160 if (get_work_pwq(work) != pwq) 5161 continue; 5162 5163 pr_cont_work(comma, work, &pcws); 5164 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5165 } 5166 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5167 pr_cont("\n"); 5168 } 5169 5170 if (!list_empty(&pwq->inactive_works)) { 5171 bool comma = false; 5172 5173 pr_info(" inactive:"); 5174 list_for_each_entry(work, &pwq->inactive_works, entry) { 5175 pr_cont_work(comma, work, &pcws); 5176 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 5177 } 5178 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); 5179 pr_cont("\n"); 5180 } 5181 } 5182 5183 /** 5184 * show_one_workqueue - dump state of specified workqueue 5185 * @wq: workqueue whose state will be printed 5186 */ 5187 void show_one_workqueue(struct workqueue_struct *wq) 5188 { 5189 struct pool_workqueue *pwq; 5190 bool idle = true; 5191 unsigned long flags; 5192 5193 for_each_pwq(pwq, wq) { 5194 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5195 idle = false; 5196 break; 5197 } 5198 } 5199 if (idle) /* Nothing to print for idle workqueue */ 5200 return; 5201 5202 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 5203 5204 for_each_pwq(pwq, wq) { 5205 raw_spin_lock_irqsave(&pwq->pool->lock, flags); 5206 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { 5207 /* 5208 * Defer printing to avoid deadlocks in console 5209 * drivers that queue work while holding locks 5210 * also taken in their write paths. 5211 */ 5212 printk_deferred_enter(); 5213 show_pwq(pwq); 5214 printk_deferred_exit(); 5215 } 5216 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); 5217 /* 5218 * We could be printing a lot from atomic context, e.g. 5219 * sysrq-t -> show_all_workqueues(). Avoid triggering 5220 * hard lockup. 5221 */ 5222 touch_nmi_watchdog(); 5223 } 5224 5225 } 5226 5227 /** 5228 * show_one_worker_pool - dump state of specified worker pool 5229 * @pool: worker pool whose state will be printed 5230 */ 5231 static void show_one_worker_pool(struct worker_pool *pool) 5232 { 5233 struct worker *worker; 5234 bool first = true; 5235 unsigned long flags; 5236 unsigned long hung = 0; 5237 5238 raw_spin_lock_irqsave(&pool->lock, flags); 5239 if (pool->nr_workers == pool->nr_idle) 5240 goto next_pool; 5241 5242 /* How long the first pending work is waiting for a worker. */ 5243 if (!list_empty(&pool->worklist)) 5244 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 5245 5246 /* 5247 * Defer printing to avoid deadlocks in console drivers that 5248 * queue work while holding locks also taken in their write 5249 * paths. 5250 */ 5251 printk_deferred_enter(); 5252 pr_info("pool %d:", pool->id); 5253 pr_cont_pool_info(pool); 5254 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers); 5255 if (pool->manager) 5256 pr_cont(" manager: %d", 5257 task_pid_nr(pool->manager->task)); 5258 list_for_each_entry(worker, &pool->idle_list, entry) { 5259 pr_cont(" %s%d", first ? "idle: " : "", 5260 task_pid_nr(worker->task)); 5261 first = false; 5262 } 5263 pr_cont("\n"); 5264 printk_deferred_exit(); 5265 next_pool: 5266 raw_spin_unlock_irqrestore(&pool->lock, flags); 5267 /* 5268 * We could be printing a lot from atomic context, e.g. 5269 * sysrq-t -> show_all_workqueues(). Avoid triggering 5270 * hard lockup. 5271 */ 5272 touch_nmi_watchdog(); 5273 5274 } 5275 5276 /** 5277 * show_all_workqueues - dump workqueue state 5278 * 5279 * Called from a sysrq handler and prints out all busy workqueues and pools. 5280 */ 5281 void show_all_workqueues(void) 5282 { 5283 struct workqueue_struct *wq; 5284 struct worker_pool *pool; 5285 int pi; 5286 5287 rcu_read_lock(); 5288 5289 pr_info("Showing busy workqueues and worker pools:\n"); 5290 5291 list_for_each_entry_rcu(wq, &workqueues, list) 5292 show_one_workqueue(wq); 5293 5294 for_each_pool(pool, pi) 5295 show_one_worker_pool(pool); 5296 5297 rcu_read_unlock(); 5298 } 5299 5300 /** 5301 * show_freezable_workqueues - dump freezable workqueue state 5302 * 5303 * Called from try_to_freeze_tasks() and prints out all freezable workqueues 5304 * still busy. 5305 */ 5306 void show_freezable_workqueues(void) 5307 { 5308 struct workqueue_struct *wq; 5309 5310 rcu_read_lock(); 5311 5312 pr_info("Showing freezable workqueues that are still busy:\n"); 5313 5314 list_for_each_entry_rcu(wq, &workqueues, list) { 5315 if (!(wq->flags & WQ_FREEZABLE)) 5316 continue; 5317 show_one_workqueue(wq); 5318 } 5319 5320 rcu_read_unlock(); 5321 } 5322 5323 /* used to show worker information through /proc/PID/{comm,stat,status} */ 5324 void wq_worker_comm(char *buf, size_t size, struct task_struct *task) 5325 { 5326 int off; 5327 5328 /* always show the actual comm */ 5329 off = strscpy(buf, task->comm, size); 5330 if (off < 0) 5331 return; 5332 5333 /* stabilize PF_WQ_WORKER and worker pool association */ 5334 mutex_lock(&wq_pool_attach_mutex); 5335 5336 if (task->flags & PF_WQ_WORKER) { 5337 struct worker *worker = kthread_data(task); 5338 struct worker_pool *pool = worker->pool; 5339 5340 if (pool) { 5341 raw_spin_lock_irq(&pool->lock); 5342 /* 5343 * ->desc tracks information (wq name or 5344 * set_worker_desc()) for the latest execution. If 5345 * current, prepend '+', otherwise '-'. 5346 */ 5347 if (worker->desc[0] != '\0') { 5348 if (worker->current_work) 5349 scnprintf(buf + off, size - off, "+%s", 5350 worker->desc); 5351 else 5352 scnprintf(buf + off, size - off, "-%s", 5353 worker->desc); 5354 } 5355 raw_spin_unlock_irq(&pool->lock); 5356 } 5357 } 5358 5359 mutex_unlock(&wq_pool_attach_mutex); 5360 } 5361 5362 #ifdef CONFIG_SMP 5363 5364 /* 5365 * CPU hotplug. 5366 * 5367 * There are two challenges in supporting CPU hotplug. Firstly, there 5368 * are a lot of assumptions on strong associations among work, pwq and 5369 * pool which make migrating pending and scheduled works very 5370 * difficult to implement without impacting hot paths. Secondly, 5371 * worker pools serve mix of short, long and very long running works making 5372 * blocked draining impractical. 5373 * 5374 * This is solved by allowing the pools to be disassociated from the CPU 5375 * running as an unbound one and allowing it to be reattached later if the 5376 * cpu comes back online. 5377 */ 5378 5379 static void unbind_workers(int cpu) 5380 { 5381 struct worker_pool *pool; 5382 struct worker *worker; 5383 5384 for_each_cpu_worker_pool(pool, cpu) { 5385 mutex_lock(&wq_pool_attach_mutex); 5386 raw_spin_lock_irq(&pool->lock); 5387 5388 /* 5389 * We've blocked all attach/detach operations. Make all workers 5390 * unbound and set DISASSOCIATED. Before this, all workers 5391 * must be on the cpu. After this, they may become diasporas. 5392 * And the preemption disabled section in their sched callbacks 5393 * are guaranteed to see WORKER_UNBOUND since the code here 5394 * is on the same cpu. 5395 */ 5396 for_each_pool_worker(worker, pool) 5397 worker->flags |= WORKER_UNBOUND; 5398 5399 pool->flags |= POOL_DISASSOCIATED; 5400 5401 /* 5402 * The handling of nr_running in sched callbacks are disabled 5403 * now. Zap nr_running. After this, nr_running stays zero and 5404 * need_more_worker() and keep_working() are always true as 5405 * long as the worklist is not empty. This pool now behaves as 5406 * an unbound (in terms of concurrency management) pool which 5407 * are served by workers tied to the pool. 5408 */ 5409 pool->nr_running = 0; 5410 5411 /* 5412 * With concurrency management just turned off, a busy 5413 * worker blocking could lead to lengthy stalls. Kick off 5414 * unbound chain execution of currently pending work items. 5415 */ 5416 kick_pool(pool); 5417 5418 raw_spin_unlock_irq(&pool->lock); 5419 5420 for_each_pool_worker(worker, pool) 5421 unbind_worker(worker); 5422 5423 mutex_unlock(&wq_pool_attach_mutex); 5424 } 5425 } 5426 5427 /** 5428 * rebind_workers - rebind all workers of a pool to the associated CPU 5429 * @pool: pool of interest 5430 * 5431 * @pool->cpu is coming online. Rebind all workers to the CPU. 5432 */ 5433 static void rebind_workers(struct worker_pool *pool) 5434 { 5435 struct worker *worker; 5436 5437 lockdep_assert_held(&wq_pool_attach_mutex); 5438 5439 /* 5440 * Restore CPU affinity of all workers. As all idle workers should 5441 * be on the run-queue of the associated CPU before any local 5442 * wake-ups for concurrency management happen, restore CPU affinity 5443 * of all workers first and then clear UNBOUND. As we're called 5444 * from CPU_ONLINE, the following shouldn't fail. 5445 */ 5446 for_each_pool_worker(worker, pool) { 5447 kthread_set_per_cpu(worker->task, pool->cpu); 5448 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 5449 pool_allowed_cpus(pool)) < 0); 5450 } 5451 5452 raw_spin_lock_irq(&pool->lock); 5453 5454 pool->flags &= ~POOL_DISASSOCIATED; 5455 5456 for_each_pool_worker(worker, pool) { 5457 unsigned int worker_flags = worker->flags; 5458 5459 /* 5460 * We want to clear UNBOUND but can't directly call 5461 * worker_clr_flags() or adjust nr_running. Atomically 5462 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 5463 * @worker will clear REBOUND using worker_clr_flags() when 5464 * it initiates the next execution cycle thus restoring 5465 * concurrency management. Note that when or whether 5466 * @worker clears REBOUND doesn't affect correctness. 5467 * 5468 * WRITE_ONCE() is necessary because @worker->flags may be 5469 * tested without holding any lock in 5470 * wq_worker_running(). Without it, NOT_RUNNING test may 5471 * fail incorrectly leading to premature concurrency 5472 * management operations. 5473 */ 5474 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 5475 worker_flags |= WORKER_REBOUND; 5476 worker_flags &= ~WORKER_UNBOUND; 5477 WRITE_ONCE(worker->flags, worker_flags); 5478 } 5479 5480 raw_spin_unlock_irq(&pool->lock); 5481 } 5482 5483 /** 5484 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 5485 * @pool: unbound pool of interest 5486 * @cpu: the CPU which is coming up 5487 * 5488 * An unbound pool may end up with a cpumask which doesn't have any online 5489 * CPUs. When a worker of such pool get scheduled, the scheduler resets 5490 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 5491 * online CPU before, cpus_allowed of all its workers should be restored. 5492 */ 5493 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 5494 { 5495 static cpumask_t cpumask; 5496 struct worker *worker; 5497 5498 lockdep_assert_held(&wq_pool_attach_mutex); 5499 5500 /* is @cpu allowed for @pool? */ 5501 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 5502 return; 5503 5504 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 5505 5506 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 5507 for_each_pool_worker(worker, pool) 5508 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 5509 } 5510 5511 int workqueue_prepare_cpu(unsigned int cpu) 5512 { 5513 struct worker_pool *pool; 5514 5515 for_each_cpu_worker_pool(pool, cpu) { 5516 if (pool->nr_workers) 5517 continue; 5518 if (!create_worker(pool)) 5519 return -ENOMEM; 5520 } 5521 return 0; 5522 } 5523 5524 int workqueue_online_cpu(unsigned int cpu) 5525 { 5526 struct worker_pool *pool; 5527 struct workqueue_struct *wq; 5528 int pi; 5529 5530 mutex_lock(&wq_pool_mutex); 5531 5532 for_each_pool(pool, pi) { 5533 mutex_lock(&wq_pool_attach_mutex); 5534 5535 if (pool->cpu == cpu) 5536 rebind_workers(pool); 5537 else if (pool->cpu < 0) 5538 restore_unbound_workers_cpumask(pool, cpu); 5539 5540 mutex_unlock(&wq_pool_attach_mutex); 5541 } 5542 5543 /* update pod affinity of unbound workqueues */ 5544 list_for_each_entry(wq, &workqueues, list) { 5545 struct workqueue_attrs *attrs = wq->unbound_attrs; 5546 5547 if (attrs) { 5548 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5549 int tcpu; 5550 5551 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5552 wq_update_pod(wq, tcpu, cpu, true); 5553 } 5554 } 5555 5556 mutex_unlock(&wq_pool_mutex); 5557 return 0; 5558 } 5559 5560 int workqueue_offline_cpu(unsigned int cpu) 5561 { 5562 struct workqueue_struct *wq; 5563 5564 /* unbinding per-cpu workers should happen on the local CPU */ 5565 if (WARN_ON(cpu != smp_processor_id())) 5566 return -1; 5567 5568 unbind_workers(cpu); 5569 5570 /* update pod affinity of unbound workqueues */ 5571 mutex_lock(&wq_pool_mutex); 5572 list_for_each_entry(wq, &workqueues, list) { 5573 struct workqueue_attrs *attrs = wq->unbound_attrs; 5574 5575 if (attrs) { 5576 const struct wq_pod_type *pt = wqattrs_pod_type(attrs); 5577 int tcpu; 5578 5579 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]]) 5580 wq_update_pod(wq, tcpu, cpu, false); 5581 } 5582 } 5583 mutex_unlock(&wq_pool_mutex); 5584 5585 return 0; 5586 } 5587 5588 struct work_for_cpu { 5589 struct work_struct work; 5590 long (*fn)(void *); 5591 void *arg; 5592 long ret; 5593 }; 5594 5595 static void work_for_cpu_fn(struct work_struct *work) 5596 { 5597 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 5598 5599 wfc->ret = wfc->fn(wfc->arg); 5600 } 5601 5602 /** 5603 * work_on_cpu - run a function in thread context on a particular cpu 5604 * @cpu: the cpu to run on 5605 * @fn: the function to run 5606 * @arg: the function arg 5607 * 5608 * It is up to the caller to ensure that the cpu doesn't go offline. 5609 * The caller must not hold any locks which would prevent @fn from completing. 5610 * 5611 * Return: The value @fn returns. 5612 */ 5613 long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 5614 { 5615 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 5616 5617 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 5618 schedule_work_on(cpu, &wfc.work); 5619 flush_work(&wfc.work); 5620 destroy_work_on_stack(&wfc.work); 5621 return wfc.ret; 5622 } 5623 EXPORT_SYMBOL_GPL(work_on_cpu); 5624 5625 /** 5626 * work_on_cpu_safe - run a function in thread context on a particular cpu 5627 * @cpu: the cpu to run on 5628 * @fn: the function to run 5629 * @arg: the function argument 5630 * 5631 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 5632 * any locks which would prevent @fn from completing. 5633 * 5634 * Return: The value @fn returns. 5635 */ 5636 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 5637 { 5638 long ret = -ENODEV; 5639 5640 cpus_read_lock(); 5641 if (cpu_online(cpu)) 5642 ret = work_on_cpu(cpu, fn, arg); 5643 cpus_read_unlock(); 5644 return ret; 5645 } 5646 EXPORT_SYMBOL_GPL(work_on_cpu_safe); 5647 #endif /* CONFIG_SMP */ 5648 5649 #ifdef CONFIG_FREEZER 5650 5651 /** 5652 * freeze_workqueues_begin - begin freezing workqueues 5653 * 5654 * Start freezing workqueues. After this function returns, all freezable 5655 * workqueues will queue new works to their inactive_works list instead of 5656 * pool->worklist. 5657 * 5658 * CONTEXT: 5659 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5660 */ 5661 void freeze_workqueues_begin(void) 5662 { 5663 struct workqueue_struct *wq; 5664 struct pool_workqueue *pwq; 5665 5666 mutex_lock(&wq_pool_mutex); 5667 5668 WARN_ON_ONCE(workqueue_freezing); 5669 workqueue_freezing = true; 5670 5671 list_for_each_entry(wq, &workqueues, list) { 5672 mutex_lock(&wq->mutex); 5673 for_each_pwq(pwq, wq) 5674 pwq_adjust_max_active(pwq); 5675 mutex_unlock(&wq->mutex); 5676 } 5677 5678 mutex_unlock(&wq_pool_mutex); 5679 } 5680 5681 /** 5682 * freeze_workqueues_busy - are freezable workqueues still busy? 5683 * 5684 * Check whether freezing is complete. This function must be called 5685 * between freeze_workqueues_begin() and thaw_workqueues(). 5686 * 5687 * CONTEXT: 5688 * Grabs and releases wq_pool_mutex. 5689 * 5690 * Return: 5691 * %true if some freezable workqueues are still busy. %false if freezing 5692 * is complete. 5693 */ 5694 bool freeze_workqueues_busy(void) 5695 { 5696 bool busy = false; 5697 struct workqueue_struct *wq; 5698 struct pool_workqueue *pwq; 5699 5700 mutex_lock(&wq_pool_mutex); 5701 5702 WARN_ON_ONCE(!workqueue_freezing); 5703 5704 list_for_each_entry(wq, &workqueues, list) { 5705 if (!(wq->flags & WQ_FREEZABLE)) 5706 continue; 5707 /* 5708 * nr_active is monotonically decreasing. It's safe 5709 * to peek without lock. 5710 */ 5711 rcu_read_lock(); 5712 for_each_pwq(pwq, wq) { 5713 WARN_ON_ONCE(pwq->nr_active < 0); 5714 if (pwq->nr_active) { 5715 busy = true; 5716 rcu_read_unlock(); 5717 goto out_unlock; 5718 } 5719 } 5720 rcu_read_unlock(); 5721 } 5722 out_unlock: 5723 mutex_unlock(&wq_pool_mutex); 5724 return busy; 5725 } 5726 5727 /** 5728 * thaw_workqueues - thaw workqueues 5729 * 5730 * Thaw workqueues. Normal queueing is restored and all collected 5731 * frozen works are transferred to their respective pool worklists. 5732 * 5733 * CONTEXT: 5734 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 5735 */ 5736 void thaw_workqueues(void) 5737 { 5738 struct workqueue_struct *wq; 5739 struct pool_workqueue *pwq; 5740 5741 mutex_lock(&wq_pool_mutex); 5742 5743 if (!workqueue_freezing) 5744 goto out_unlock; 5745 5746 workqueue_freezing = false; 5747 5748 /* restore max_active and repopulate worklist */ 5749 list_for_each_entry(wq, &workqueues, list) { 5750 mutex_lock(&wq->mutex); 5751 for_each_pwq(pwq, wq) 5752 pwq_adjust_max_active(pwq); 5753 mutex_unlock(&wq->mutex); 5754 } 5755 5756 out_unlock: 5757 mutex_unlock(&wq_pool_mutex); 5758 } 5759 #endif /* CONFIG_FREEZER */ 5760 5761 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask) 5762 { 5763 LIST_HEAD(ctxs); 5764 int ret = 0; 5765 struct workqueue_struct *wq; 5766 struct apply_wqattrs_ctx *ctx, *n; 5767 5768 lockdep_assert_held(&wq_pool_mutex); 5769 5770 list_for_each_entry(wq, &workqueues, list) { 5771 if (!(wq->flags & WQ_UNBOUND)) 5772 continue; 5773 /* creating multiple pwqs breaks ordering guarantee */ 5774 if (wq->flags & __WQ_ORDERED) 5775 continue; 5776 5777 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask); 5778 if (IS_ERR(ctx)) { 5779 ret = PTR_ERR(ctx); 5780 break; 5781 } 5782 5783 list_add_tail(&ctx->list, &ctxs); 5784 } 5785 5786 list_for_each_entry_safe(ctx, n, &ctxs, list) { 5787 if (!ret) 5788 apply_wqattrs_commit(ctx); 5789 apply_wqattrs_cleanup(ctx); 5790 } 5791 5792 if (!ret) { 5793 mutex_lock(&wq_pool_attach_mutex); 5794 cpumask_copy(wq_unbound_cpumask, unbound_cpumask); 5795 mutex_unlock(&wq_pool_attach_mutex); 5796 } 5797 return ret; 5798 } 5799 5800 /** 5801 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 5802 * @cpumask: the cpumask to set 5803 * 5804 * The low-level workqueues cpumask is a global cpumask that limits 5805 * the affinity of all unbound workqueues. This function check the @cpumask 5806 * and apply it to all unbound workqueues and updates all pwqs of them. 5807 * 5808 * Return: 0 - Success 5809 * -EINVAL - Invalid @cpumask 5810 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 5811 */ 5812 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 5813 { 5814 int ret = -EINVAL; 5815 5816 /* 5817 * Not excluding isolated cpus on purpose. 5818 * If the user wishes to include them, we allow that. 5819 */ 5820 cpumask_and(cpumask, cpumask, cpu_possible_mask); 5821 if (!cpumask_empty(cpumask)) { 5822 apply_wqattrs_lock(); 5823 if (cpumask_equal(cpumask, wq_unbound_cpumask)) { 5824 ret = 0; 5825 goto out_unlock; 5826 } 5827 5828 ret = workqueue_apply_unbound_cpumask(cpumask); 5829 5830 out_unlock: 5831 apply_wqattrs_unlock(); 5832 } 5833 5834 return ret; 5835 } 5836 5837 static int parse_affn_scope(const char *val) 5838 { 5839 int i; 5840 5841 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) { 5842 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i]))) 5843 return i; 5844 } 5845 return -EINVAL; 5846 } 5847 5848 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp) 5849 { 5850 int affn; 5851 5852 affn = parse_affn_scope(val); 5853 if (affn < 0) 5854 return affn; 5855 5856 wq_affn_dfl = affn; 5857 return 0; 5858 } 5859 5860 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp) 5861 { 5862 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]); 5863 } 5864 5865 static const struct kernel_param_ops wq_affn_dfl_ops = { 5866 .set = wq_affn_dfl_set, 5867 .get = wq_affn_dfl_get, 5868 }; 5869 5870 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644); 5871 5872 #ifdef CONFIG_SYSFS 5873 /* 5874 * Workqueues with WQ_SYSFS flag set is visible to userland via 5875 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 5876 * following attributes. 5877 * 5878 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 5879 * max_active RW int : maximum number of in-flight work items 5880 * 5881 * Unbound workqueues have the following extra attributes. 5882 * 5883 * nice RW int : nice value of the workers 5884 * cpumask RW mask : bitmask of allowed CPUs for the workers 5885 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none) 5886 * affinity_strict RW bool : worker CPU affinity is strict 5887 */ 5888 struct wq_device { 5889 struct workqueue_struct *wq; 5890 struct device dev; 5891 }; 5892 5893 static struct workqueue_struct *dev_to_wq(struct device *dev) 5894 { 5895 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 5896 5897 return wq_dev->wq; 5898 } 5899 5900 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 5901 char *buf) 5902 { 5903 struct workqueue_struct *wq = dev_to_wq(dev); 5904 5905 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 5906 } 5907 static DEVICE_ATTR_RO(per_cpu); 5908 5909 static ssize_t max_active_show(struct device *dev, 5910 struct device_attribute *attr, char *buf) 5911 { 5912 struct workqueue_struct *wq = dev_to_wq(dev); 5913 5914 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 5915 } 5916 5917 static ssize_t max_active_store(struct device *dev, 5918 struct device_attribute *attr, const char *buf, 5919 size_t count) 5920 { 5921 struct workqueue_struct *wq = dev_to_wq(dev); 5922 int val; 5923 5924 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 5925 return -EINVAL; 5926 5927 workqueue_set_max_active(wq, val); 5928 return count; 5929 } 5930 static DEVICE_ATTR_RW(max_active); 5931 5932 static struct attribute *wq_sysfs_attrs[] = { 5933 &dev_attr_per_cpu.attr, 5934 &dev_attr_max_active.attr, 5935 NULL, 5936 }; 5937 ATTRIBUTE_GROUPS(wq_sysfs); 5938 5939 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 5940 char *buf) 5941 { 5942 struct workqueue_struct *wq = dev_to_wq(dev); 5943 int written; 5944 5945 mutex_lock(&wq->mutex); 5946 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 5947 mutex_unlock(&wq->mutex); 5948 5949 return written; 5950 } 5951 5952 /* prepare workqueue_attrs for sysfs store operations */ 5953 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 5954 { 5955 struct workqueue_attrs *attrs; 5956 5957 lockdep_assert_held(&wq_pool_mutex); 5958 5959 attrs = alloc_workqueue_attrs(); 5960 if (!attrs) 5961 return NULL; 5962 5963 copy_workqueue_attrs(attrs, wq->unbound_attrs); 5964 return attrs; 5965 } 5966 5967 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 5968 const char *buf, size_t count) 5969 { 5970 struct workqueue_struct *wq = dev_to_wq(dev); 5971 struct workqueue_attrs *attrs; 5972 int ret = -ENOMEM; 5973 5974 apply_wqattrs_lock(); 5975 5976 attrs = wq_sysfs_prep_attrs(wq); 5977 if (!attrs) 5978 goto out_unlock; 5979 5980 if (sscanf(buf, "%d", &attrs->nice) == 1 && 5981 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 5982 ret = apply_workqueue_attrs_locked(wq, attrs); 5983 else 5984 ret = -EINVAL; 5985 5986 out_unlock: 5987 apply_wqattrs_unlock(); 5988 free_workqueue_attrs(attrs); 5989 return ret ?: count; 5990 } 5991 5992 static ssize_t wq_cpumask_show(struct device *dev, 5993 struct device_attribute *attr, char *buf) 5994 { 5995 struct workqueue_struct *wq = dev_to_wq(dev); 5996 int written; 5997 5998 mutex_lock(&wq->mutex); 5999 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6000 cpumask_pr_args(wq->unbound_attrs->cpumask)); 6001 mutex_unlock(&wq->mutex); 6002 return written; 6003 } 6004 6005 static ssize_t wq_cpumask_store(struct device *dev, 6006 struct device_attribute *attr, 6007 const char *buf, size_t count) 6008 { 6009 struct workqueue_struct *wq = dev_to_wq(dev); 6010 struct workqueue_attrs *attrs; 6011 int ret = -ENOMEM; 6012 6013 apply_wqattrs_lock(); 6014 6015 attrs = wq_sysfs_prep_attrs(wq); 6016 if (!attrs) 6017 goto out_unlock; 6018 6019 ret = cpumask_parse(buf, attrs->cpumask); 6020 if (!ret) 6021 ret = apply_workqueue_attrs_locked(wq, attrs); 6022 6023 out_unlock: 6024 apply_wqattrs_unlock(); 6025 free_workqueue_attrs(attrs); 6026 return ret ?: count; 6027 } 6028 6029 static ssize_t wq_affn_scope_show(struct device *dev, 6030 struct device_attribute *attr, char *buf) 6031 { 6032 struct workqueue_struct *wq = dev_to_wq(dev); 6033 int written; 6034 6035 mutex_lock(&wq->mutex); 6036 written = scnprintf(buf, PAGE_SIZE, "%s\n", 6037 wq_affn_names[wq->unbound_attrs->affn_scope]); 6038 mutex_unlock(&wq->mutex); 6039 6040 return written; 6041 } 6042 6043 static ssize_t wq_affn_scope_store(struct device *dev, 6044 struct device_attribute *attr, 6045 const char *buf, size_t count) 6046 { 6047 struct workqueue_struct *wq = dev_to_wq(dev); 6048 struct workqueue_attrs *attrs; 6049 int affn, ret = -ENOMEM; 6050 6051 affn = parse_affn_scope(buf); 6052 if (affn < 0) 6053 return affn; 6054 6055 apply_wqattrs_lock(); 6056 attrs = wq_sysfs_prep_attrs(wq); 6057 if (attrs) { 6058 attrs->affn_scope = affn; 6059 ret = apply_workqueue_attrs_locked(wq, attrs); 6060 } 6061 apply_wqattrs_unlock(); 6062 free_workqueue_attrs(attrs); 6063 return ret ?: count; 6064 } 6065 6066 static ssize_t wq_affinity_strict_show(struct device *dev, 6067 struct device_attribute *attr, char *buf) 6068 { 6069 struct workqueue_struct *wq = dev_to_wq(dev); 6070 6071 return scnprintf(buf, PAGE_SIZE, "%d\n", 6072 wq->unbound_attrs->affn_strict); 6073 } 6074 6075 static ssize_t wq_affinity_strict_store(struct device *dev, 6076 struct device_attribute *attr, 6077 const char *buf, size_t count) 6078 { 6079 struct workqueue_struct *wq = dev_to_wq(dev); 6080 struct workqueue_attrs *attrs; 6081 int v, ret = -ENOMEM; 6082 6083 if (sscanf(buf, "%d", &v) != 1) 6084 return -EINVAL; 6085 6086 apply_wqattrs_lock(); 6087 attrs = wq_sysfs_prep_attrs(wq); 6088 if (attrs) { 6089 attrs->affn_strict = (bool)v; 6090 ret = apply_workqueue_attrs_locked(wq, attrs); 6091 } 6092 apply_wqattrs_unlock(); 6093 free_workqueue_attrs(attrs); 6094 return ret ?: count; 6095 } 6096 6097 static struct device_attribute wq_sysfs_unbound_attrs[] = { 6098 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 6099 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 6100 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store), 6101 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store), 6102 __ATTR_NULL, 6103 }; 6104 6105 static struct bus_type wq_subsys = { 6106 .name = "workqueue", 6107 .dev_groups = wq_sysfs_groups, 6108 }; 6109 6110 static ssize_t wq_unbound_cpumask_show(struct device *dev, 6111 struct device_attribute *attr, char *buf) 6112 { 6113 int written; 6114 6115 mutex_lock(&wq_pool_mutex); 6116 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 6117 cpumask_pr_args(wq_unbound_cpumask)); 6118 mutex_unlock(&wq_pool_mutex); 6119 6120 return written; 6121 } 6122 6123 static ssize_t wq_unbound_cpumask_store(struct device *dev, 6124 struct device_attribute *attr, const char *buf, size_t count) 6125 { 6126 cpumask_var_t cpumask; 6127 int ret; 6128 6129 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 6130 return -ENOMEM; 6131 6132 ret = cpumask_parse(buf, cpumask); 6133 if (!ret) 6134 ret = workqueue_set_unbound_cpumask(cpumask); 6135 6136 free_cpumask_var(cpumask); 6137 return ret ? ret : count; 6138 } 6139 6140 static struct device_attribute wq_sysfs_cpumask_attr = 6141 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 6142 wq_unbound_cpumask_store); 6143 6144 static int __init wq_sysfs_init(void) 6145 { 6146 struct device *dev_root; 6147 int err; 6148 6149 err = subsys_virtual_register(&wq_subsys, NULL); 6150 if (err) 6151 return err; 6152 6153 dev_root = bus_get_dev_root(&wq_subsys); 6154 if (dev_root) { 6155 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr); 6156 put_device(dev_root); 6157 } 6158 return err; 6159 } 6160 core_initcall(wq_sysfs_init); 6161 6162 static void wq_device_release(struct device *dev) 6163 { 6164 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6165 6166 kfree(wq_dev); 6167 } 6168 6169 /** 6170 * workqueue_sysfs_register - make a workqueue visible in sysfs 6171 * @wq: the workqueue to register 6172 * 6173 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 6174 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 6175 * which is the preferred method. 6176 * 6177 * Workqueue user should use this function directly iff it wants to apply 6178 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 6179 * apply_workqueue_attrs() may race against userland updating the 6180 * attributes. 6181 * 6182 * Return: 0 on success, -errno on failure. 6183 */ 6184 int workqueue_sysfs_register(struct workqueue_struct *wq) 6185 { 6186 struct wq_device *wq_dev; 6187 int ret; 6188 6189 /* 6190 * Adjusting max_active or creating new pwqs by applying 6191 * attributes breaks ordering guarantee. Disallow exposing ordered 6192 * workqueues. 6193 */ 6194 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 6195 return -EINVAL; 6196 6197 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 6198 if (!wq_dev) 6199 return -ENOMEM; 6200 6201 wq_dev->wq = wq; 6202 wq_dev->dev.bus = &wq_subsys; 6203 wq_dev->dev.release = wq_device_release; 6204 dev_set_name(&wq_dev->dev, "%s", wq->name); 6205 6206 /* 6207 * unbound_attrs are created separately. Suppress uevent until 6208 * everything is ready. 6209 */ 6210 dev_set_uevent_suppress(&wq_dev->dev, true); 6211 6212 ret = device_register(&wq_dev->dev); 6213 if (ret) { 6214 put_device(&wq_dev->dev); 6215 wq->wq_dev = NULL; 6216 return ret; 6217 } 6218 6219 if (wq->flags & WQ_UNBOUND) { 6220 struct device_attribute *attr; 6221 6222 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 6223 ret = device_create_file(&wq_dev->dev, attr); 6224 if (ret) { 6225 device_unregister(&wq_dev->dev); 6226 wq->wq_dev = NULL; 6227 return ret; 6228 } 6229 } 6230 } 6231 6232 dev_set_uevent_suppress(&wq_dev->dev, false); 6233 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 6234 return 0; 6235 } 6236 6237 /** 6238 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 6239 * @wq: the workqueue to unregister 6240 * 6241 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 6242 */ 6243 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 6244 { 6245 struct wq_device *wq_dev = wq->wq_dev; 6246 6247 if (!wq->wq_dev) 6248 return; 6249 6250 wq->wq_dev = NULL; 6251 device_unregister(&wq_dev->dev); 6252 } 6253 #else /* CONFIG_SYSFS */ 6254 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 6255 #endif /* CONFIG_SYSFS */ 6256 6257 /* 6258 * Workqueue watchdog. 6259 * 6260 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 6261 * flush dependency, a concurrency managed work item which stays RUNNING 6262 * indefinitely. Workqueue stalls can be very difficult to debug as the 6263 * usual warning mechanisms don't trigger and internal workqueue state is 6264 * largely opaque. 6265 * 6266 * Workqueue watchdog monitors all worker pools periodically and dumps 6267 * state if some pools failed to make forward progress for a while where 6268 * forward progress is defined as the first item on ->worklist changing. 6269 * 6270 * This mechanism is controlled through the kernel parameter 6271 * "workqueue.watchdog_thresh" which can be updated at runtime through the 6272 * corresponding sysfs parameter file. 6273 */ 6274 #ifdef CONFIG_WQ_WATCHDOG 6275 6276 static unsigned long wq_watchdog_thresh = 30; 6277 static struct timer_list wq_watchdog_timer; 6278 6279 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 6280 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 6281 6282 /* 6283 * Show workers that might prevent the processing of pending work items. 6284 * The only candidates are CPU-bound workers in the running state. 6285 * Pending work items should be handled by another idle worker 6286 * in all other situations. 6287 */ 6288 static void show_cpu_pool_hog(struct worker_pool *pool) 6289 { 6290 struct worker *worker; 6291 unsigned long flags; 6292 int bkt; 6293 6294 raw_spin_lock_irqsave(&pool->lock, flags); 6295 6296 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6297 if (task_is_running(worker->task)) { 6298 /* 6299 * Defer printing to avoid deadlocks in console 6300 * drivers that queue work while holding locks 6301 * also taken in their write paths. 6302 */ 6303 printk_deferred_enter(); 6304 6305 pr_info("pool %d:\n", pool->id); 6306 sched_show_task(worker->task); 6307 6308 printk_deferred_exit(); 6309 } 6310 } 6311 6312 raw_spin_unlock_irqrestore(&pool->lock, flags); 6313 } 6314 6315 static void show_cpu_pools_hogs(void) 6316 { 6317 struct worker_pool *pool; 6318 int pi; 6319 6320 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 6321 6322 rcu_read_lock(); 6323 6324 for_each_pool(pool, pi) { 6325 if (pool->cpu_stall) 6326 show_cpu_pool_hog(pool); 6327 6328 } 6329 6330 rcu_read_unlock(); 6331 } 6332 6333 static void wq_watchdog_reset_touched(void) 6334 { 6335 int cpu; 6336 6337 wq_watchdog_touched = jiffies; 6338 for_each_possible_cpu(cpu) 6339 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6340 } 6341 6342 static void wq_watchdog_timer_fn(struct timer_list *unused) 6343 { 6344 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 6345 bool lockup_detected = false; 6346 bool cpu_pool_stall = false; 6347 unsigned long now = jiffies; 6348 struct worker_pool *pool; 6349 int pi; 6350 6351 if (!thresh) 6352 return; 6353 6354 rcu_read_lock(); 6355 6356 for_each_pool(pool, pi) { 6357 unsigned long pool_ts, touched, ts; 6358 6359 pool->cpu_stall = false; 6360 if (list_empty(&pool->worklist)) 6361 continue; 6362 6363 /* 6364 * If a virtual machine is stopped by the host it can look to 6365 * the watchdog like a stall. 6366 */ 6367 kvm_check_and_clear_guest_paused(); 6368 6369 /* get the latest of pool and touched timestamps */ 6370 if (pool->cpu >= 0) 6371 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 6372 else 6373 touched = READ_ONCE(wq_watchdog_touched); 6374 pool_ts = READ_ONCE(pool->watchdog_ts); 6375 6376 if (time_after(pool_ts, touched)) 6377 ts = pool_ts; 6378 else 6379 ts = touched; 6380 6381 /* did we stall? */ 6382 if (time_after(now, ts + thresh)) { 6383 lockup_detected = true; 6384 if (pool->cpu >= 0) { 6385 pool->cpu_stall = true; 6386 cpu_pool_stall = true; 6387 } 6388 pr_emerg("BUG: workqueue lockup - pool"); 6389 pr_cont_pool_info(pool); 6390 pr_cont(" stuck for %us!\n", 6391 jiffies_to_msecs(now - pool_ts) / 1000); 6392 } 6393 6394 6395 } 6396 6397 rcu_read_unlock(); 6398 6399 if (lockup_detected) 6400 show_all_workqueues(); 6401 6402 if (cpu_pool_stall) 6403 show_cpu_pools_hogs(); 6404 6405 wq_watchdog_reset_touched(); 6406 mod_timer(&wq_watchdog_timer, jiffies + thresh); 6407 } 6408 6409 notrace void wq_watchdog_touch(int cpu) 6410 { 6411 if (cpu >= 0) 6412 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 6413 6414 wq_watchdog_touched = jiffies; 6415 } 6416 6417 static void wq_watchdog_set_thresh(unsigned long thresh) 6418 { 6419 wq_watchdog_thresh = 0; 6420 del_timer_sync(&wq_watchdog_timer); 6421 6422 if (thresh) { 6423 wq_watchdog_thresh = thresh; 6424 wq_watchdog_reset_touched(); 6425 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 6426 } 6427 } 6428 6429 static int wq_watchdog_param_set_thresh(const char *val, 6430 const struct kernel_param *kp) 6431 { 6432 unsigned long thresh; 6433 int ret; 6434 6435 ret = kstrtoul(val, 0, &thresh); 6436 if (ret) 6437 return ret; 6438 6439 if (system_wq) 6440 wq_watchdog_set_thresh(thresh); 6441 else 6442 wq_watchdog_thresh = thresh; 6443 6444 return 0; 6445 } 6446 6447 static const struct kernel_param_ops wq_watchdog_thresh_ops = { 6448 .set = wq_watchdog_param_set_thresh, 6449 .get = param_get_ulong, 6450 }; 6451 6452 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 6453 0644); 6454 6455 static void wq_watchdog_init(void) 6456 { 6457 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 6458 wq_watchdog_set_thresh(wq_watchdog_thresh); 6459 } 6460 6461 #else /* CONFIG_WQ_WATCHDOG */ 6462 6463 static inline void wq_watchdog_init(void) { } 6464 6465 #endif /* CONFIG_WQ_WATCHDOG */ 6466 6467 /** 6468 * workqueue_init_early - early init for workqueue subsystem 6469 * 6470 * This is the first step of three-staged workqueue subsystem initialization and 6471 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are 6472 * up. It sets up all the data structures and system workqueues and allows early 6473 * boot code to create workqueues and queue/cancel work items. Actual work item 6474 * execution starts only after kthreads can be created and scheduled right 6475 * before early initcalls. 6476 */ 6477 void __init workqueue_init_early(void) 6478 { 6479 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM]; 6480 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 6481 int i, cpu; 6482 6483 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 6484 6485 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 6486 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ)); 6487 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN)); 6488 6489 if (!cpumask_empty(&wq_cmdline_cpumask)) 6490 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask); 6491 6492 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 6493 6494 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 6495 BUG_ON(!wq_update_pod_attrs_buf); 6496 6497 /* initialize WQ_AFFN_SYSTEM pods */ 6498 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6499 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL); 6500 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6501 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod); 6502 6503 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE)); 6504 6505 wq_update_pod_attrs_buf = alloc_workqueue_attrs(); 6506 BUG_ON(!wq_update_pod_attrs_buf); 6507 6508 pt->nr_pods = 1; 6509 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask); 6510 pt->pod_node[0] = NUMA_NO_NODE; 6511 pt->cpu_pod[0] = 0; 6512 6513 /* initialize CPU pools */ 6514 for_each_possible_cpu(cpu) { 6515 struct worker_pool *pool; 6516 6517 i = 0; 6518 for_each_cpu_worker_pool(pool, cpu) { 6519 BUG_ON(init_worker_pool(pool)); 6520 pool->cpu = cpu; 6521 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 6522 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); 6523 pool->attrs->nice = std_nice[i++]; 6524 pool->attrs->affn_strict = true; 6525 pool->node = cpu_to_node(cpu); 6526 6527 /* alloc pool ID */ 6528 mutex_lock(&wq_pool_mutex); 6529 BUG_ON(worker_pool_assign_id(pool)); 6530 mutex_unlock(&wq_pool_mutex); 6531 } 6532 } 6533 6534 /* create default unbound and ordered wq attrs */ 6535 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 6536 struct workqueue_attrs *attrs; 6537 6538 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6539 attrs->nice = std_nice[i]; 6540 unbound_std_wq_attrs[i] = attrs; 6541 6542 /* 6543 * An ordered wq should have only one pwq as ordering is 6544 * guaranteed by max_active which is enforced by pwqs. 6545 */ 6546 BUG_ON(!(attrs = alloc_workqueue_attrs())); 6547 attrs->nice = std_nice[i]; 6548 attrs->ordered = true; 6549 ordered_wq_attrs[i] = attrs; 6550 } 6551 6552 system_wq = alloc_workqueue("events", 0, 0); 6553 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 6554 system_long_wq = alloc_workqueue("events_long", 0, 0); 6555 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 6556 WQ_MAX_ACTIVE); 6557 system_freezable_wq = alloc_workqueue("events_freezable", 6558 WQ_FREEZABLE, 0); 6559 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 6560 WQ_POWER_EFFICIENT, 0); 6561 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 6562 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 6563 0); 6564 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 6565 !system_unbound_wq || !system_freezable_wq || 6566 !system_power_efficient_wq || 6567 !system_freezable_power_efficient_wq); 6568 } 6569 6570 static void __init wq_cpu_intensive_thresh_init(void) 6571 { 6572 unsigned long thresh; 6573 unsigned long bogo; 6574 6575 /* if the user set it to a specific value, keep it */ 6576 if (wq_cpu_intensive_thresh_us != ULONG_MAX) 6577 return; 6578 6579 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release"); 6580 BUG_ON(IS_ERR(pwq_release_worker)); 6581 6582 /* 6583 * The default of 10ms is derived from the fact that most modern (as of 6584 * 2023) processors can do a lot in 10ms and that it's just below what 6585 * most consider human-perceivable. However, the kernel also runs on a 6586 * lot slower CPUs including microcontrollers where the threshold is way 6587 * too low. 6588 * 6589 * Let's scale up the threshold upto 1 second if BogoMips is below 4000. 6590 * This is by no means accurate but it doesn't have to be. The mechanism 6591 * is still useful even when the threshold is fully scaled up. Also, as 6592 * the reports would usually be applicable to everyone, some machines 6593 * operating on longer thresholds won't significantly diminish their 6594 * usefulness. 6595 */ 6596 thresh = 10 * USEC_PER_MSEC; 6597 6598 /* see init/calibrate.c for lpj -> BogoMIPS calculation */ 6599 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1); 6600 if (bogo < 4000) 6601 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC); 6602 6603 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n", 6604 loops_per_jiffy, bogo, thresh); 6605 6606 wq_cpu_intensive_thresh_us = thresh; 6607 } 6608 6609 /** 6610 * workqueue_init - bring workqueue subsystem fully online 6611 * 6612 * This is the second step of three-staged workqueue subsystem initialization 6613 * and invoked as soon as kthreads can be created and scheduled. Workqueues have 6614 * been created and work items queued on them, but there are no kworkers 6615 * executing the work items yet. Populate the worker pools with the initial 6616 * workers and enable future kworker creations. 6617 */ 6618 void __init workqueue_init(void) 6619 { 6620 struct workqueue_struct *wq; 6621 struct worker_pool *pool; 6622 int cpu, bkt; 6623 6624 wq_cpu_intensive_thresh_init(); 6625 6626 mutex_lock(&wq_pool_mutex); 6627 6628 /* 6629 * Per-cpu pools created earlier could be missing node hint. Fix them 6630 * up. Also, create a rescuer for workqueues that requested it. 6631 */ 6632 for_each_possible_cpu(cpu) { 6633 for_each_cpu_worker_pool(pool, cpu) { 6634 pool->node = cpu_to_node(cpu); 6635 } 6636 } 6637 6638 list_for_each_entry(wq, &workqueues, list) { 6639 WARN(init_rescuer(wq), 6640 "workqueue: failed to create early rescuer for %s", 6641 wq->name); 6642 } 6643 6644 mutex_unlock(&wq_pool_mutex); 6645 6646 /* create the initial workers */ 6647 for_each_online_cpu(cpu) { 6648 for_each_cpu_worker_pool(pool, cpu) { 6649 pool->flags &= ~POOL_DISASSOCIATED; 6650 BUG_ON(!create_worker(pool)); 6651 } 6652 } 6653 6654 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 6655 BUG_ON(!create_worker(pool)); 6656 6657 wq_online = true; 6658 wq_watchdog_init(); 6659 } 6660 6661 /* 6662 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to 6663 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique 6664 * and consecutive pod ID. The rest of @pt is initialized accordingly. 6665 */ 6666 static void __init init_pod_type(struct wq_pod_type *pt, 6667 bool (*cpus_share_pod)(int, int)) 6668 { 6669 int cur, pre, cpu, pod; 6670 6671 pt->nr_pods = 0; 6672 6673 /* init @pt->cpu_pod[] according to @cpus_share_pod() */ 6674 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL); 6675 BUG_ON(!pt->cpu_pod); 6676 6677 for_each_possible_cpu(cur) { 6678 for_each_possible_cpu(pre) { 6679 if (pre >= cur) { 6680 pt->cpu_pod[cur] = pt->nr_pods++; 6681 break; 6682 } 6683 if (cpus_share_pod(cur, pre)) { 6684 pt->cpu_pod[cur] = pt->cpu_pod[pre]; 6685 break; 6686 } 6687 } 6688 } 6689 6690 /* init the rest to match @pt->cpu_pod[] */ 6691 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL); 6692 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL); 6693 BUG_ON(!pt->pod_cpus || !pt->pod_node); 6694 6695 for (pod = 0; pod < pt->nr_pods; pod++) 6696 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL)); 6697 6698 for_each_possible_cpu(cpu) { 6699 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]); 6700 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu); 6701 } 6702 } 6703 6704 static bool __init cpus_dont_share(int cpu0, int cpu1) 6705 { 6706 return false; 6707 } 6708 6709 static bool __init cpus_share_smt(int cpu0, int cpu1) 6710 { 6711 #ifdef CONFIG_SCHED_SMT 6712 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1)); 6713 #else 6714 return false; 6715 #endif 6716 } 6717 6718 static bool __init cpus_share_numa(int cpu0, int cpu1) 6719 { 6720 return cpu_to_node(cpu0) == cpu_to_node(cpu1); 6721 } 6722 6723 /** 6724 * workqueue_init_topology - initialize CPU pods for unbound workqueues 6725 * 6726 * This is the third step of there-staged workqueue subsystem initialization and 6727 * invoked after SMP and topology information are fully initialized. It 6728 * initializes the unbound CPU pods accordingly. 6729 */ 6730 void __init workqueue_init_topology(void) 6731 { 6732 struct workqueue_struct *wq; 6733 int cpu; 6734 6735 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); 6736 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); 6737 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); 6738 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); 6739 6740 mutex_lock(&wq_pool_mutex); 6741 6742 /* 6743 * Workqueues allocated earlier would have all CPUs sharing the default 6744 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU 6745 * combinations to apply per-pod sharing. 6746 */ 6747 list_for_each_entry(wq, &workqueues, list) { 6748 for_each_online_cpu(cpu) { 6749 wq_update_pod(wq, cpu, cpu, true); 6750 } 6751 } 6752 6753 mutex_unlock(&wq_pool_mutex); 6754 } 6755 6756 void __warn_flushing_systemwide_wq(void) 6757 { 6758 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n"); 6759 dump_stack(); 6760 } 6761 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); 6762 6763 static int __init workqueue_unbound_cpus_setup(char *str) 6764 { 6765 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) { 6766 cpumask_clear(&wq_cmdline_cpumask); 6767 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n"); 6768 } 6769 6770 return 1; 6771 } 6772 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup); 6773