1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * workqueue.h --- work queue handling for Linux. 4 */ 5 6 #ifndef _LINUX_WORKQUEUE_H 7 #define _LINUX_WORKQUEUE_H 8 9 #include <linux/timer.h> 10 #include <linux/linkage.h> 11 #include <linux/bitops.h> 12 #include <linux/lockdep.h> 13 #include <linux/threads.h> 14 #include <linux/atomic.h> 15 #include <linux/cpumask.h> 16 #include <linux/rcupdate.h> 17 #include <linux/workqueue_types.h> 18 19 /* 20 * The first word is the work queue pointer and the flags rolled into 21 * one 22 */ 23 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) 24 25 enum work_bits { 26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 27 WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */ 28 WORK_STRUCT_PWQ_BIT, /* data points to pwq */ 29 WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */ 30 #ifdef CONFIG_DEBUG_OBJECTS_WORK 31 WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */ 32 #endif 33 WORK_STRUCT_FLAG_BITS, 34 35 /* color for workqueue flushing */ 36 WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS, 37 WORK_STRUCT_COLOR_BITS = 4, 38 39 /* 40 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/ 41 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512 42 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors. 43 * 44 * MSB 45 * [ pwq pointer ] [ flush color ] [ STRUCT flags ] 46 * 4 bits 4 or 5 bits 47 */ 48 WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, 49 50 /* 51 * data contains off-queue information when !WORK_STRUCT_PWQ. 52 * 53 * MSB 54 * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ] 55 * 1 bit 4 or 5 bits 56 */ 57 WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, 58 WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT, 59 WORK_OFFQ_FLAG_END, 60 WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, 61 62 /* 63 * When a work item is off queue, the high bits encode off-queue flags 64 * and the last pool it was on. Cap pool ID to 31 bits and use the 65 * highest number to indicate that no pool is associated. 66 */ 67 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, 68 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, 69 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 70 }; 71 72 enum work_flags { 73 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 74 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, 75 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 76 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 77 #ifdef CONFIG_DEBUG_OBJECTS_WORK 78 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 79 #else 80 WORK_STRUCT_STATIC = 0, 81 #endif 82 }; 83 84 enum wq_misc_consts { 85 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), 86 87 /* not bound to any CPU, prefer the local CPU */ 88 WORK_CPU_UNBOUND = NR_CPUS, 89 90 /* bit mask for work_busy() return values */ 91 WORK_BUSY_PENDING = 1 << 0, 92 WORK_BUSY_RUNNING = 1 << 1, 93 94 /* maximum string length for set_worker_desc() */ 95 WORKER_DESC_LEN = 24, 96 }; 97 98 /* Convenience constants - of type 'unsigned long', not 'enum'! */ 99 #define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT) 100 #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) 101 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) 102 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) 103 #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) 104 105 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) 106 #define WORK_DATA_STATIC_INIT() \ 107 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) 108 109 struct delayed_work { 110 struct work_struct work; 111 struct timer_list timer; 112 113 /* target workqueue and CPU ->timer uses to queue ->work */ 114 struct workqueue_struct *wq; 115 int cpu; 116 }; 117 118 struct rcu_work { 119 struct work_struct work; 120 struct rcu_head rcu; 121 122 /* target workqueue ->rcu uses to queue ->work */ 123 struct workqueue_struct *wq; 124 }; 125 126 enum wq_affn_scope { 127 WQ_AFFN_DFL, /* use system default */ 128 WQ_AFFN_CPU, /* one pod per CPU */ 129 WQ_AFFN_SMT, /* one pod poer SMT */ 130 WQ_AFFN_CACHE, /* one pod per LLC */ 131 WQ_AFFN_NUMA, /* one pod per NUMA node */ 132 WQ_AFFN_SYSTEM, /* one pod across the whole system */ 133 134 WQ_AFFN_NR_TYPES, 135 }; 136 137 /** 138 * struct workqueue_attrs - A struct for workqueue attributes. 139 * 140 * This can be used to change attributes of an unbound workqueue. 141 */ 142 struct workqueue_attrs { 143 /** 144 * @nice: nice level 145 */ 146 int nice; 147 148 /** 149 * @cpumask: allowed CPUs 150 * 151 * Work items in this workqueue are affine to these CPUs and not allowed 152 * to execute on other CPUs. A pool serving a workqueue must have the 153 * same @cpumask. 154 */ 155 cpumask_var_t cpumask; 156 157 /** 158 * @__pod_cpumask: internal attribute used to create per-pod pools 159 * 160 * Internal use only. 161 * 162 * Per-pod unbound worker pools are used to improve locality. Always a 163 * subset of ->cpumask. A workqueue can be associated with multiple 164 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement 165 * of a pool's @__pod_cpumask is strict depends on @affn_strict. 166 */ 167 cpumask_var_t __pod_cpumask; 168 169 /** 170 * @affn_strict: affinity scope is strict 171 * 172 * If clear, workqueue will make a best-effort attempt at starting the 173 * worker inside @__pod_cpumask but the scheduler is free to migrate it 174 * outside. 175 * 176 * If set, workers are only allowed to run inside @__pod_cpumask. 177 */ 178 bool affn_strict; 179 180 /* 181 * Below fields aren't properties of a worker_pool. They only modify how 182 * :c:func:`apply_workqueue_attrs` select pools and thus don't 183 * participate in pool hash calculations or equality comparisons. 184 */ 185 186 /** 187 * @affn_scope: unbound CPU affinity scope 188 * 189 * CPU pods are used to improve execution locality of unbound work 190 * items. There are multiple pod types, one for each wq_affn_scope, and 191 * every CPU in the system belongs to one pod in every pod type. CPUs 192 * that belong to the same pod share the worker pool. For example, 193 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker 194 * pool for each NUMA node. 195 */ 196 enum wq_affn_scope affn_scope; 197 198 /** 199 * @ordered: work items must be executed one by one in queueing order 200 */ 201 bool ordered; 202 }; 203 204 static inline struct delayed_work *to_delayed_work(struct work_struct *work) 205 { 206 return container_of(work, struct delayed_work, work); 207 } 208 209 static inline struct rcu_work *to_rcu_work(struct work_struct *work) 210 { 211 return container_of(work, struct rcu_work, work); 212 } 213 214 struct execute_work { 215 struct work_struct work; 216 }; 217 218 #ifdef CONFIG_LOCKDEP 219 /* 220 * NB: because we have to copy the lockdep_map, setting _key 221 * here is required, otherwise it could get initialised to the 222 * copy of the lockdep_map! 223 */ 224 #define __WORK_INIT_LOCKDEP_MAP(n, k) \ 225 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), 226 #else 227 #define __WORK_INIT_LOCKDEP_MAP(n, k) 228 #endif 229 230 #define __WORK_INITIALIZER(n, f) { \ 231 .data = WORK_DATA_STATIC_INIT(), \ 232 .entry = { &(n).entry, &(n).entry }, \ 233 .func = (f), \ 234 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 235 } 236 237 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 238 .work = __WORK_INITIALIZER((n).work, (f)), \ 239 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ 240 (tflags) | TIMER_IRQSAFE), \ 241 } 242 243 #define DECLARE_WORK(n, f) \ 244 struct work_struct n = __WORK_INITIALIZER(n, f) 245 246 #define DECLARE_DELAYED_WORK(n, f) \ 247 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 248 249 #define DECLARE_DEFERRABLE_WORK(n, f) \ 250 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 251 252 #ifdef CONFIG_DEBUG_OBJECTS_WORK 253 extern void __init_work(struct work_struct *work, int onstack); 254 extern void destroy_work_on_stack(struct work_struct *work); 255 extern void destroy_delayed_work_on_stack(struct delayed_work *work); 256 static inline unsigned int work_static(struct work_struct *work) 257 { 258 return *work_data_bits(work) & WORK_STRUCT_STATIC; 259 } 260 #else 261 static inline void __init_work(struct work_struct *work, int onstack) { } 262 static inline void destroy_work_on_stack(struct work_struct *work) { } 263 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } 264 static inline unsigned int work_static(struct work_struct *work) { return 0; } 265 #endif 266 267 /* 268 * initialize all of a work item in one go 269 * 270 * NOTE! No point in using "atomic_long_set()": using a direct 271 * assignment of the work data initializer allows the compiler 272 * to generate better code. 273 */ 274 #ifdef CONFIG_LOCKDEP 275 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ 276 do { \ 277 __init_work((_work), _onstack); \ 278 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 279 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ 280 INIT_LIST_HEAD(&(_work)->entry); \ 281 (_work)->func = (_func); \ 282 } while (0) 283 #else 284 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ 285 do { \ 286 __init_work((_work), _onstack); \ 287 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 288 INIT_LIST_HEAD(&(_work)->entry); \ 289 (_work)->func = (_func); \ 290 } while (0) 291 #endif 292 293 #define __INIT_WORK(_work, _func, _onstack) \ 294 do { \ 295 static __maybe_unused struct lock_class_key __key; \ 296 \ 297 __INIT_WORK_KEY(_work, _func, _onstack, &__key); \ 298 } while (0) 299 300 #define INIT_WORK(_work, _func) \ 301 __INIT_WORK((_work), (_func), 0) 302 303 #define INIT_WORK_ONSTACK(_work, _func) \ 304 __INIT_WORK((_work), (_func), 1) 305 306 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \ 307 __INIT_WORK_KEY((_work), (_func), 1, _key) 308 309 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 310 do { \ 311 INIT_WORK(&(_work)->work, (_func)); \ 312 __init_timer(&(_work)->timer, \ 313 delayed_work_timer_fn, \ 314 (_tflags) | TIMER_IRQSAFE); \ 315 } while (0) 316 317 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 318 do { \ 319 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 320 __init_timer_on_stack(&(_work)->timer, \ 321 delayed_work_timer_fn, \ 322 (_tflags) | TIMER_IRQSAFE); \ 323 } while (0) 324 325 #define INIT_DELAYED_WORK(_work, _func) \ 326 __INIT_DELAYED_WORK(_work, _func, 0) 327 328 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 329 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 330 331 #define INIT_DEFERRABLE_WORK(_work, _func) \ 332 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 333 334 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 335 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 336 337 #define INIT_RCU_WORK(_work, _func) \ 338 INIT_WORK(&(_work)->work, (_func)) 339 340 #define INIT_RCU_WORK_ONSTACK(_work, _func) \ 341 INIT_WORK_ONSTACK(&(_work)->work, (_func)) 342 343 /** 344 * work_pending - Find out whether a work item is currently pending 345 * @work: The work item in question 346 */ 347 #define work_pending(work) \ 348 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 349 350 /** 351 * delayed_work_pending - Find out whether a delayable work item is currently 352 * pending 353 * @w: The work item in question 354 */ 355 #define delayed_work_pending(w) \ 356 work_pending(&(w)->work) 357 358 /* 359 * Workqueue flags and constants. For details, please refer to 360 * Documentation/core-api/workqueue.rst. 361 */ 362 enum wq_flags { 363 WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */ 364 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 365 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 366 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 367 WQ_HIGHPRI = 1 << 4, /* high priority */ 368 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ 369 WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ 370 371 /* 372 * Per-cpu workqueues are generally preferred because they tend to 373 * show better performance thanks to cache locality. Per-cpu 374 * workqueues exclude the scheduler from choosing the CPU to 375 * execute the worker threads, which has an unfortunate side effect 376 * of increasing power consumption. 377 * 378 * The scheduler considers a CPU idle if it doesn't have any task 379 * to execute and tries to keep idle cores idle to conserve power; 380 * however, for example, a per-cpu work item scheduled from an 381 * interrupt handler on an idle CPU will force the scheduler to 382 * execute the work item on that CPU breaking the idleness, which in 383 * turn may lead to more scheduling choices which are sub-optimal 384 * in terms of power consumption. 385 * 386 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default 387 * but become unbound if workqueue.power_efficient kernel param is 388 * specified. Per-cpu workqueues which are identified to 389 * contribute significantly to power-consumption are identified and 390 * marked with this flag and enabling the power_efficient mode 391 * leads to noticeable power saving at the cost of small 392 * performance disadvantage. 393 * 394 * http://thread.gmane.org/gmane.linux.kernel/1480396 395 */ 396 WQ_POWER_EFFICIENT = 1 << 7, 397 398 __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ 399 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 400 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 401 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ 402 403 /* BH wq only allows the following flags */ 404 __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI, 405 }; 406 407 enum wq_consts { 408 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 409 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, 410 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 411 412 /* 413 * Per-node default cap on min_active. Unless explicitly set, min_active 414 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see 415 * workqueue_struct->min_active definition. 416 */ 417 WQ_DFL_MIN_ACTIVE = 8, 418 }; 419 420 /* 421 * System-wide workqueues which are always present. 422 * 423 * system_wq is the one used by schedule[_delayed]_work[_on](). 424 * Multi-CPU multi-threaded. There are users which expect relatively 425 * short queue flush time. Don't queue works which can run for too 426 * long. 427 * 428 * system_highpri_wq is similar to system_wq but for work items which 429 * require WQ_HIGHPRI. 430 * 431 * system_long_wq is similar to system_wq but may host long running 432 * works. Queue flushing might take relatively long. 433 * 434 * system_unbound_wq is unbound workqueue. Workers are not bound to 435 * any specific CPU, not concurrency managed, and all queued works are 436 * executed immediately as long as max_active limit is not reached and 437 * resources are available. 438 * 439 * system_freezable_wq is equivalent to system_wq except that it's 440 * freezable. 441 * 442 * *_power_efficient_wq are inclined towards saving power and converted 443 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, 444 * they are same as their non-power-efficient counterparts - e.g. 445 * system_power_efficient_wq is identical to system_wq if 446 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. 447 * 448 * system_bh[_highpri]_wq are convenience interface to softirq. BH work items 449 * are executed in the queueing CPU's BH context in the queueing order. 450 */ 451 extern struct workqueue_struct *system_wq; 452 extern struct workqueue_struct *system_highpri_wq; 453 extern struct workqueue_struct *system_long_wq; 454 extern struct workqueue_struct *system_unbound_wq; 455 extern struct workqueue_struct *system_freezable_wq; 456 extern struct workqueue_struct *system_power_efficient_wq; 457 extern struct workqueue_struct *system_freezable_power_efficient_wq; 458 extern struct workqueue_struct *system_bh_wq; 459 extern struct workqueue_struct *system_bh_highpri_wq; 460 461 void workqueue_softirq_action(bool highpri); 462 void workqueue_softirq_dead(unsigned int cpu); 463 464 /** 465 * alloc_workqueue - allocate a workqueue 466 * @fmt: printf format for the name of the workqueue 467 * @flags: WQ_* flags 468 * @max_active: max in-flight work items, 0 for default 469 * remaining args: args for @fmt 470 * 471 * For a per-cpu workqueue, @max_active limits the number of in-flight work 472 * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be 473 * executing at most one work item for the workqueue. 474 * 475 * For unbound workqueues, @max_active limits the number of in-flight work items 476 * for the whole system. e.g. @max_active of 16 indicates that that there can be 477 * at most 16 work items executing for the workqueue in the whole system. 478 * 479 * As sharing the same active counter for an unbound workqueue across multiple 480 * NUMA nodes can be expensive, @max_active is distributed to each NUMA node 481 * according to the proportion of the number of online CPUs and enforced 482 * independently. 483 * 484 * Depending on online CPU distribution, a node may end up with per-node 485 * max_active which is significantly lower than @max_active, which can lead to 486 * deadlocks if the per-node concurrency limit is lower than the maximum number 487 * of interdependent work items for the workqueue. 488 * 489 * To guarantee forward progress regardless of online CPU distribution, the 490 * concurrency limit on every node is guaranteed to be equal to or greater than 491 * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means 492 * that the sum of per-node max_active's may be larger than @max_active. 493 * 494 * For detailed information on %WQ_* flags, please refer to 495 * Documentation/core-api/workqueue.rst. 496 * 497 * RETURNS: 498 * Pointer to the allocated workqueue on success, %NULL on failure. 499 */ 500 __printf(1, 4) struct workqueue_struct * 501 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); 502 503 /** 504 * alloc_ordered_workqueue - allocate an ordered workqueue 505 * @fmt: printf format for the name of the workqueue 506 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 507 * @args: args for @fmt 508 * 509 * Allocate an ordered workqueue. An ordered workqueue executes at 510 * most one work item at any given time in the queued order. They are 511 * implemented as unbound workqueues with @max_active of one. 512 * 513 * RETURNS: 514 * Pointer to the allocated workqueue on success, %NULL on failure. 515 */ 516 #define alloc_ordered_workqueue(fmt, flags, args...) \ 517 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) 518 519 #define create_workqueue(name) \ 520 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) 521 #define create_freezable_workqueue(name) \ 522 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ 523 WQ_MEM_RECLAIM, 1, (name)) 524 #define create_singlethread_workqueue(name) \ 525 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) 526 527 #define from_work(var, callback_work, work_fieldname) \ 528 container_of(callback_work, typeof(*var), work_fieldname) 529 530 extern void destroy_workqueue(struct workqueue_struct *wq); 531 532 struct workqueue_attrs *alloc_workqueue_attrs(void); 533 void free_workqueue_attrs(struct workqueue_attrs *attrs); 534 int apply_workqueue_attrs(struct workqueue_struct *wq, 535 const struct workqueue_attrs *attrs); 536 extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask); 537 538 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 539 struct work_struct *work); 540 extern bool queue_work_node(int node, struct workqueue_struct *wq, 541 struct work_struct *work); 542 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 543 struct delayed_work *work, unsigned long delay); 544 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 545 struct delayed_work *dwork, unsigned long delay); 546 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); 547 548 extern void __flush_workqueue(struct workqueue_struct *wq); 549 extern void drain_workqueue(struct workqueue_struct *wq); 550 551 extern int schedule_on_each_cpu(work_func_t func); 552 553 int execute_in_process_context(work_func_t fn, struct execute_work *); 554 555 extern bool flush_work(struct work_struct *work); 556 extern bool cancel_work(struct work_struct *work); 557 extern bool cancel_work_sync(struct work_struct *work); 558 559 extern bool flush_delayed_work(struct delayed_work *dwork); 560 extern bool cancel_delayed_work(struct delayed_work *dwork); 561 extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 562 563 extern bool flush_rcu_work(struct rcu_work *rwork); 564 565 extern void workqueue_set_max_active(struct workqueue_struct *wq, 566 int max_active); 567 extern void workqueue_set_min_active(struct workqueue_struct *wq, 568 int min_active); 569 extern struct work_struct *current_work(void); 570 extern bool current_is_workqueue_rescuer(void); 571 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 572 extern unsigned int work_busy(struct work_struct *work); 573 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 574 extern void print_worker_info(const char *log_lvl, struct task_struct *task); 575 extern void show_all_workqueues(void); 576 extern void show_freezable_workqueues(void); 577 extern void show_one_workqueue(struct workqueue_struct *wq); 578 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); 579 580 /** 581 * queue_work - queue work on a workqueue 582 * @wq: workqueue to use 583 * @work: work to queue 584 * 585 * Returns %false if @work was already on a queue, %true otherwise. 586 * 587 * We queue the work to the CPU on which it was submitted, but if the CPU dies 588 * it can be processed by another CPU. 589 * 590 * Memory-ordering properties: If it returns %true, guarantees that all stores 591 * preceding the call to queue_work() in the program order will be visible from 592 * the CPU which will execute @work by the time such work executes, e.g., 593 * 594 * { x is initially 0 } 595 * 596 * CPU0 CPU1 597 * 598 * WRITE_ONCE(x, 1); [ @work is being executed ] 599 * r0 = queue_work(wq, work); r1 = READ_ONCE(x); 600 * 601 * Forbids: r0 == true && r1 == 0 602 */ 603 static inline bool queue_work(struct workqueue_struct *wq, 604 struct work_struct *work) 605 { 606 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 607 } 608 609 /** 610 * queue_delayed_work - queue work on a workqueue after delay 611 * @wq: workqueue to use 612 * @dwork: delayable work to queue 613 * @delay: number of jiffies to wait before queueing 614 * 615 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 616 */ 617 static inline bool queue_delayed_work(struct workqueue_struct *wq, 618 struct delayed_work *dwork, 619 unsigned long delay) 620 { 621 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 622 } 623 624 /** 625 * mod_delayed_work - modify delay of or queue a delayed work 626 * @wq: workqueue to use 627 * @dwork: work to queue 628 * @delay: number of jiffies to wait before queueing 629 * 630 * mod_delayed_work_on() on local CPU. 631 */ 632 static inline bool mod_delayed_work(struct workqueue_struct *wq, 633 struct delayed_work *dwork, 634 unsigned long delay) 635 { 636 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 637 } 638 639 /** 640 * schedule_work_on - put work task on a specific cpu 641 * @cpu: cpu to put the work task on 642 * @work: job to be done 643 * 644 * This puts a job on a specific cpu 645 */ 646 static inline bool schedule_work_on(int cpu, struct work_struct *work) 647 { 648 return queue_work_on(cpu, system_wq, work); 649 } 650 651 /** 652 * schedule_work - put work task in global workqueue 653 * @work: job to be done 654 * 655 * Returns %false if @work was already on the kernel-global workqueue and 656 * %true otherwise. 657 * 658 * This puts a job in the kernel-global workqueue if it was not already 659 * queued and leaves it in the same position on the kernel-global 660 * workqueue otherwise. 661 * 662 * Shares the same memory-ordering properties of queue_work(), cf. the 663 * DocBook header of queue_work(). 664 */ 665 static inline bool schedule_work(struct work_struct *work) 666 { 667 return queue_work(system_wq, work); 668 } 669 670 /* 671 * Detect attempt to flush system-wide workqueues at compile time when possible. 672 * Warn attempt to flush system-wide workqueues at runtime. 673 * 674 * See https://lkml.kernel.org/r/[email protected] 675 * for reasons and steps for converting system-wide workqueues into local workqueues. 676 */ 677 extern void __warn_flushing_systemwide_wq(void) 678 __compiletime_warning("Please avoid flushing system-wide workqueues."); 679 680 /* Please stop using this function, for this function will be removed in near future. */ 681 #define flush_scheduled_work() \ 682 ({ \ 683 __warn_flushing_systemwide_wq(); \ 684 __flush_workqueue(system_wq); \ 685 }) 686 687 #define flush_workqueue(wq) \ 688 ({ \ 689 struct workqueue_struct *_wq = (wq); \ 690 \ 691 if ((__builtin_constant_p(_wq == system_wq) && \ 692 _wq == system_wq) || \ 693 (__builtin_constant_p(_wq == system_highpri_wq) && \ 694 _wq == system_highpri_wq) || \ 695 (__builtin_constant_p(_wq == system_long_wq) && \ 696 _wq == system_long_wq) || \ 697 (__builtin_constant_p(_wq == system_unbound_wq) && \ 698 _wq == system_unbound_wq) || \ 699 (__builtin_constant_p(_wq == system_freezable_wq) && \ 700 _wq == system_freezable_wq) || \ 701 (__builtin_constant_p(_wq == system_power_efficient_wq) && \ 702 _wq == system_power_efficient_wq) || \ 703 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ 704 _wq == system_freezable_power_efficient_wq)) \ 705 __warn_flushing_systemwide_wq(); \ 706 __flush_workqueue(_wq); \ 707 }) 708 709 /** 710 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 711 * @cpu: cpu to use 712 * @dwork: job to be done 713 * @delay: number of jiffies to wait 714 * 715 * After waiting for a given time this puts a job in the kernel-global 716 * workqueue on the specified CPU. 717 */ 718 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 719 unsigned long delay) 720 { 721 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 722 } 723 724 /** 725 * schedule_delayed_work - put work task in global workqueue after delay 726 * @dwork: job to be done 727 * @delay: number of jiffies to wait or 0 for immediate execution 728 * 729 * After waiting for a given time this puts a job in the kernel-global 730 * workqueue. 731 */ 732 static inline bool schedule_delayed_work(struct delayed_work *dwork, 733 unsigned long delay) 734 { 735 return queue_delayed_work(system_wq, dwork, delay); 736 } 737 738 #ifndef CONFIG_SMP 739 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 740 { 741 return fn(arg); 742 } 743 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 744 { 745 return fn(arg); 746 } 747 #else 748 long work_on_cpu_key(int cpu, long (*fn)(void *), 749 void *arg, struct lock_class_key *key); 750 /* 751 * A new key is defined for each caller to make sure the work 752 * associated with the function doesn't share its locking class. 753 */ 754 #define work_on_cpu(_cpu, _fn, _arg) \ 755 ({ \ 756 static struct lock_class_key __key; \ 757 \ 758 work_on_cpu_key(_cpu, _fn, _arg, &__key); \ 759 }) 760 761 long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 762 void *arg, struct lock_class_key *key); 763 764 /* 765 * A new key is defined for each caller to make sure the work 766 * associated with the function doesn't share its locking class. 767 */ 768 #define work_on_cpu_safe(_cpu, _fn, _arg) \ 769 ({ \ 770 static struct lock_class_key __key; \ 771 \ 772 work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ 773 }) 774 #endif /* CONFIG_SMP */ 775 776 #ifdef CONFIG_FREEZER 777 extern void freeze_workqueues_begin(void); 778 extern bool freeze_workqueues_busy(void); 779 extern void thaw_workqueues(void); 780 #endif /* CONFIG_FREEZER */ 781 782 #ifdef CONFIG_SYSFS 783 int workqueue_sysfs_register(struct workqueue_struct *wq); 784 #else /* CONFIG_SYSFS */ 785 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) 786 { return 0; } 787 #endif /* CONFIG_SYSFS */ 788 789 #ifdef CONFIG_WQ_WATCHDOG 790 void wq_watchdog_touch(int cpu); 791 #else /* CONFIG_WQ_WATCHDOG */ 792 static inline void wq_watchdog_touch(int cpu) { } 793 #endif /* CONFIG_WQ_WATCHDOG */ 794 795 #ifdef CONFIG_SMP 796 int workqueue_prepare_cpu(unsigned int cpu); 797 int workqueue_online_cpu(unsigned int cpu); 798 int workqueue_offline_cpu(unsigned int cpu); 799 #endif 800 801 void __init workqueue_init_early(void); 802 void __init workqueue_init(void); 803 void __init workqueue_init_topology(void); 804 805 #endif 806