1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * workqueue.h --- work queue handling for Linux. 4 */ 5 6 #ifndef _LINUX_WORKQUEUE_H 7 #define _LINUX_WORKQUEUE_H 8 9 #include <linux/timer.h> 10 #include <linux/linkage.h> 11 #include <linux/bitops.h> 12 #include <linux/lockdep.h> 13 #include <linux/threads.h> 14 #include <linux/atomic.h> 15 #include <linux/cpumask.h> 16 #include <linux/rcupdate.h> 17 #include <linux/workqueue_types.h> 18 19 /* 20 * The first word is the work queue pointer and the flags rolled into 21 * one 22 */ 23 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) 24 25 enum work_bits { 26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 27 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ 28 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ 29 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 30 #ifdef CONFIG_DEBUG_OBJECTS_WORK 31 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ 32 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ 33 #else 34 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ 35 #endif 36 37 WORK_STRUCT_COLOR_BITS = 4, 38 39 /* 40 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. 41 * This makes pwqs aligned to 256 bytes and allows 16 workqueue 42 * flush colors. 43 */ 44 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 45 WORK_STRUCT_COLOR_BITS, 46 47 /* data contains off-queue information when !WORK_STRUCT_PWQ */ 48 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, 49 50 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, 51 52 /* 53 * When a work item is off queue, its high bits point to the last 54 * pool it was on. Cap at 31 bits and use the highest number to 55 * indicate that no pool is associated. 56 */ 57 WORK_OFFQ_FLAG_BITS = 1, 58 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 59 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, 60 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 61 62 }; 63 64 enum work_flags { 65 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 66 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, 67 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 68 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 69 #ifdef CONFIG_DEBUG_OBJECTS_WORK 70 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 71 #else 72 WORK_STRUCT_STATIC = 0, 73 #endif 74 }; 75 76 enum wq_misc_consts { 77 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), 78 79 /* not bound to any CPU, prefer the local CPU */ 80 WORK_CPU_UNBOUND = NR_CPUS, 81 82 /* bit mask for work_busy() return values */ 83 WORK_BUSY_PENDING = 1 << 0, 84 WORK_BUSY_RUNNING = 1 << 1, 85 86 /* maximum string length for set_worker_desc() */ 87 WORKER_DESC_LEN = 24, 88 }; 89 90 /* Convenience constants - of type 'unsigned long', not 'enum'! */ 91 #define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING) 92 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) 93 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) 94 95 #define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1) 96 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 97 98 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) 99 #define WORK_DATA_STATIC_INIT() \ 100 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) 101 102 struct delayed_work { 103 struct work_struct work; 104 struct timer_list timer; 105 106 /* target workqueue and CPU ->timer uses to queue ->work */ 107 struct workqueue_struct *wq; 108 int cpu; 109 }; 110 111 struct rcu_work { 112 struct work_struct work; 113 struct rcu_head rcu; 114 115 /* target workqueue ->rcu uses to queue ->work */ 116 struct workqueue_struct *wq; 117 }; 118 119 enum wq_affn_scope { 120 WQ_AFFN_DFL, /* use system default */ 121 WQ_AFFN_CPU, /* one pod per CPU */ 122 WQ_AFFN_SMT, /* one pod poer SMT */ 123 WQ_AFFN_CACHE, /* one pod per LLC */ 124 WQ_AFFN_NUMA, /* one pod per NUMA node */ 125 WQ_AFFN_SYSTEM, /* one pod across the whole system */ 126 127 WQ_AFFN_NR_TYPES, 128 }; 129 130 /** 131 * struct workqueue_attrs - A struct for workqueue attributes. 132 * 133 * This can be used to change attributes of an unbound workqueue. 134 */ 135 struct workqueue_attrs { 136 /** 137 * @nice: nice level 138 */ 139 int nice; 140 141 /** 142 * @cpumask: allowed CPUs 143 * 144 * Work items in this workqueue are affine to these CPUs and not allowed 145 * to execute on other CPUs. A pool serving a workqueue must have the 146 * same @cpumask. 147 */ 148 cpumask_var_t cpumask; 149 150 /** 151 * @__pod_cpumask: internal attribute used to create per-pod pools 152 * 153 * Internal use only. 154 * 155 * Per-pod unbound worker pools are used to improve locality. Always a 156 * subset of ->cpumask. A workqueue can be associated with multiple 157 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement 158 * of a pool's @__pod_cpumask is strict depends on @affn_strict. 159 */ 160 cpumask_var_t __pod_cpumask; 161 162 /** 163 * @affn_strict: affinity scope is strict 164 * 165 * If clear, workqueue will make a best-effort attempt at starting the 166 * worker inside @__pod_cpumask but the scheduler is free to migrate it 167 * outside. 168 * 169 * If set, workers are only allowed to run inside @__pod_cpumask. 170 */ 171 bool affn_strict; 172 173 /* 174 * Below fields aren't properties of a worker_pool. They only modify how 175 * :c:func:`apply_workqueue_attrs` select pools and thus don't 176 * participate in pool hash calculations or equality comparisons. 177 */ 178 179 /** 180 * @affn_scope: unbound CPU affinity scope 181 * 182 * CPU pods are used to improve execution locality of unbound work 183 * items. There are multiple pod types, one for each wq_affn_scope, and 184 * every CPU in the system belongs to one pod in every pod type. CPUs 185 * that belong to the same pod share the worker pool. For example, 186 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker 187 * pool for each NUMA node. 188 */ 189 enum wq_affn_scope affn_scope; 190 191 /** 192 * @ordered: work items must be executed one by one in queueing order 193 */ 194 bool ordered; 195 }; 196 197 static inline struct delayed_work *to_delayed_work(struct work_struct *work) 198 { 199 return container_of(work, struct delayed_work, work); 200 } 201 202 static inline struct rcu_work *to_rcu_work(struct work_struct *work) 203 { 204 return container_of(work, struct rcu_work, work); 205 } 206 207 struct execute_work { 208 struct work_struct work; 209 }; 210 211 #ifdef CONFIG_LOCKDEP 212 /* 213 * NB: because we have to copy the lockdep_map, setting _key 214 * here is required, otherwise it could get initialised to the 215 * copy of the lockdep_map! 216 */ 217 #define __WORK_INIT_LOCKDEP_MAP(n, k) \ 218 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), 219 #else 220 #define __WORK_INIT_LOCKDEP_MAP(n, k) 221 #endif 222 223 #define __WORK_INITIALIZER(n, f) { \ 224 .data = WORK_DATA_STATIC_INIT(), \ 225 .entry = { &(n).entry, &(n).entry }, \ 226 .func = (f), \ 227 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 228 } 229 230 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 231 .work = __WORK_INITIALIZER((n).work, (f)), \ 232 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ 233 (tflags) | TIMER_IRQSAFE), \ 234 } 235 236 #define DECLARE_WORK(n, f) \ 237 struct work_struct n = __WORK_INITIALIZER(n, f) 238 239 #define DECLARE_DELAYED_WORK(n, f) \ 240 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 241 242 #define DECLARE_DEFERRABLE_WORK(n, f) \ 243 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 244 245 #ifdef CONFIG_DEBUG_OBJECTS_WORK 246 extern void __init_work(struct work_struct *work, int onstack); 247 extern void destroy_work_on_stack(struct work_struct *work); 248 extern void destroy_delayed_work_on_stack(struct delayed_work *work); 249 static inline unsigned int work_static(struct work_struct *work) 250 { 251 return *work_data_bits(work) & WORK_STRUCT_STATIC; 252 } 253 #else 254 static inline void __init_work(struct work_struct *work, int onstack) { } 255 static inline void destroy_work_on_stack(struct work_struct *work) { } 256 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } 257 static inline unsigned int work_static(struct work_struct *work) { return 0; } 258 #endif 259 260 /* 261 * initialize all of a work item in one go 262 * 263 * NOTE! No point in using "atomic_long_set()": using a direct 264 * assignment of the work data initializer allows the compiler 265 * to generate better code. 266 */ 267 #ifdef CONFIG_LOCKDEP 268 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ 269 do { \ 270 __init_work((_work), _onstack); \ 271 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 272 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ 273 INIT_LIST_HEAD(&(_work)->entry); \ 274 (_work)->func = (_func); \ 275 } while (0) 276 #else 277 #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ 278 do { \ 279 __init_work((_work), _onstack); \ 280 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 281 INIT_LIST_HEAD(&(_work)->entry); \ 282 (_work)->func = (_func); \ 283 } while (0) 284 #endif 285 286 #define __INIT_WORK(_work, _func, _onstack) \ 287 do { \ 288 static __maybe_unused struct lock_class_key __key; \ 289 \ 290 __INIT_WORK_KEY(_work, _func, _onstack, &__key); \ 291 } while (0) 292 293 #define INIT_WORK(_work, _func) \ 294 __INIT_WORK((_work), (_func), 0) 295 296 #define INIT_WORK_ONSTACK(_work, _func) \ 297 __INIT_WORK((_work), (_func), 1) 298 299 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \ 300 __INIT_WORK_KEY((_work), (_func), 1, _key) 301 302 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 303 do { \ 304 INIT_WORK(&(_work)->work, (_func)); \ 305 __init_timer(&(_work)->timer, \ 306 delayed_work_timer_fn, \ 307 (_tflags) | TIMER_IRQSAFE); \ 308 } while (0) 309 310 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 311 do { \ 312 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 313 __init_timer_on_stack(&(_work)->timer, \ 314 delayed_work_timer_fn, \ 315 (_tflags) | TIMER_IRQSAFE); \ 316 } while (0) 317 318 #define INIT_DELAYED_WORK(_work, _func) \ 319 __INIT_DELAYED_WORK(_work, _func, 0) 320 321 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 322 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 323 324 #define INIT_DEFERRABLE_WORK(_work, _func) \ 325 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 326 327 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 328 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 329 330 #define INIT_RCU_WORK(_work, _func) \ 331 INIT_WORK(&(_work)->work, (_func)) 332 333 #define INIT_RCU_WORK_ONSTACK(_work, _func) \ 334 INIT_WORK_ONSTACK(&(_work)->work, (_func)) 335 336 /** 337 * work_pending - Find out whether a work item is currently pending 338 * @work: The work item in question 339 */ 340 #define work_pending(work) \ 341 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 342 343 /** 344 * delayed_work_pending - Find out whether a delayable work item is currently 345 * pending 346 * @w: The work item in question 347 */ 348 #define delayed_work_pending(w) \ 349 work_pending(&(w)->work) 350 351 /* 352 * Workqueue flags and constants. For details, please refer to 353 * Documentation/core-api/workqueue.rst. 354 */ 355 enum wq_flags { 356 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 357 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 358 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 359 WQ_HIGHPRI = 1 << 4, /* high priority */ 360 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ 361 WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ 362 363 /* 364 * Per-cpu workqueues are generally preferred because they tend to 365 * show better performance thanks to cache locality. Per-cpu 366 * workqueues exclude the scheduler from choosing the CPU to 367 * execute the worker threads, which has an unfortunate side effect 368 * of increasing power consumption. 369 * 370 * The scheduler considers a CPU idle if it doesn't have any task 371 * to execute and tries to keep idle cores idle to conserve power; 372 * however, for example, a per-cpu work item scheduled from an 373 * interrupt handler on an idle CPU will force the scheduler to 374 * execute the work item on that CPU breaking the idleness, which in 375 * turn may lead to more scheduling choices which are sub-optimal 376 * in terms of power consumption. 377 * 378 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default 379 * but become unbound if workqueue.power_efficient kernel param is 380 * specified. Per-cpu workqueues which are identified to 381 * contribute significantly to power-consumption are identified and 382 * marked with this flag and enabling the power_efficient mode 383 * leads to noticeable power saving at the cost of small 384 * performance disadvantage. 385 * 386 * http://thread.gmane.org/gmane.linux.kernel/1480396 387 */ 388 WQ_POWER_EFFICIENT = 1 << 7, 389 390 __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ 391 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 392 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 393 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ 394 __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ 395 }; 396 397 enum wq_consts { 398 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 399 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, 400 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 401 }; 402 403 /* 404 * System-wide workqueues which are always present. 405 * 406 * system_wq is the one used by schedule[_delayed]_work[_on](). 407 * Multi-CPU multi-threaded. There are users which expect relatively 408 * short queue flush time. Don't queue works which can run for too 409 * long. 410 * 411 * system_highpri_wq is similar to system_wq but for work items which 412 * require WQ_HIGHPRI. 413 * 414 * system_long_wq is similar to system_wq but may host long running 415 * works. Queue flushing might take relatively long. 416 * 417 * system_unbound_wq is unbound workqueue. Workers are not bound to 418 * any specific CPU, not concurrency managed, and all queued works are 419 * executed immediately as long as max_active limit is not reached and 420 * resources are available. 421 * 422 * system_freezable_wq is equivalent to system_wq except that it's 423 * freezable. 424 * 425 * *_power_efficient_wq are inclined towards saving power and converted 426 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, 427 * they are same as their non-power-efficient counterparts - e.g. 428 * system_power_efficient_wq is identical to system_wq if 429 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. 430 */ 431 extern struct workqueue_struct *system_wq; 432 extern struct workqueue_struct *system_highpri_wq; 433 extern struct workqueue_struct *system_long_wq; 434 extern struct workqueue_struct *system_unbound_wq; 435 extern struct workqueue_struct *system_freezable_wq; 436 extern struct workqueue_struct *system_power_efficient_wq; 437 extern struct workqueue_struct *system_freezable_power_efficient_wq; 438 439 /** 440 * alloc_workqueue - allocate a workqueue 441 * @fmt: printf format for the name of the workqueue 442 * @flags: WQ_* flags 443 * @max_active: max in-flight work items per CPU, 0 for default 444 * remaining args: args for @fmt 445 * 446 * Allocate a workqueue with the specified parameters. For detailed 447 * information on WQ_* flags, please refer to 448 * Documentation/core-api/workqueue.rst. 449 * 450 * RETURNS: 451 * Pointer to the allocated workqueue on success, %NULL on failure. 452 */ 453 __printf(1, 4) struct workqueue_struct * 454 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); 455 456 /** 457 * alloc_ordered_workqueue - allocate an ordered workqueue 458 * @fmt: printf format for the name of the workqueue 459 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 460 * @args: args for @fmt 461 * 462 * Allocate an ordered workqueue. An ordered workqueue executes at 463 * most one work item at any given time in the queued order. They are 464 * implemented as unbound workqueues with @max_active of one. 465 * 466 * RETURNS: 467 * Pointer to the allocated workqueue on success, %NULL on failure. 468 */ 469 #define alloc_ordered_workqueue(fmt, flags, args...) \ 470 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ 471 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) 472 473 #define create_workqueue(name) \ 474 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) 475 #define create_freezable_workqueue(name) \ 476 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ 477 WQ_MEM_RECLAIM, 1, (name)) 478 #define create_singlethread_workqueue(name) \ 479 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) 480 481 extern void destroy_workqueue(struct workqueue_struct *wq); 482 483 struct workqueue_attrs *alloc_workqueue_attrs(void); 484 void free_workqueue_attrs(struct workqueue_attrs *attrs); 485 int apply_workqueue_attrs(struct workqueue_struct *wq, 486 const struct workqueue_attrs *attrs); 487 extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask); 488 489 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 490 struct work_struct *work); 491 extern bool queue_work_node(int node, struct workqueue_struct *wq, 492 struct work_struct *work); 493 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 494 struct delayed_work *work, unsigned long delay); 495 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 496 struct delayed_work *dwork, unsigned long delay); 497 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); 498 499 extern void __flush_workqueue(struct workqueue_struct *wq); 500 extern void drain_workqueue(struct workqueue_struct *wq); 501 502 extern int schedule_on_each_cpu(work_func_t func); 503 504 int execute_in_process_context(work_func_t fn, struct execute_work *); 505 506 extern bool flush_work(struct work_struct *work); 507 extern bool cancel_work(struct work_struct *work); 508 extern bool cancel_work_sync(struct work_struct *work); 509 510 extern bool flush_delayed_work(struct delayed_work *dwork); 511 extern bool cancel_delayed_work(struct delayed_work *dwork); 512 extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 513 514 extern bool flush_rcu_work(struct rcu_work *rwork); 515 516 extern void workqueue_set_max_active(struct workqueue_struct *wq, 517 int max_active); 518 extern struct work_struct *current_work(void); 519 extern bool current_is_workqueue_rescuer(void); 520 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 521 extern unsigned int work_busy(struct work_struct *work); 522 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 523 extern void print_worker_info(const char *log_lvl, struct task_struct *task); 524 extern void show_all_workqueues(void); 525 extern void show_freezable_workqueues(void); 526 extern void show_one_workqueue(struct workqueue_struct *wq); 527 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); 528 529 /** 530 * queue_work - queue work on a workqueue 531 * @wq: workqueue to use 532 * @work: work to queue 533 * 534 * Returns %false if @work was already on a queue, %true otherwise. 535 * 536 * We queue the work to the CPU on which it was submitted, but if the CPU dies 537 * it can be processed by another CPU. 538 * 539 * Memory-ordering properties: If it returns %true, guarantees that all stores 540 * preceding the call to queue_work() in the program order will be visible from 541 * the CPU which will execute @work by the time such work executes, e.g., 542 * 543 * { x is initially 0 } 544 * 545 * CPU0 CPU1 546 * 547 * WRITE_ONCE(x, 1); [ @work is being executed ] 548 * r0 = queue_work(wq, work); r1 = READ_ONCE(x); 549 * 550 * Forbids: r0 == true && r1 == 0 551 */ 552 static inline bool queue_work(struct workqueue_struct *wq, 553 struct work_struct *work) 554 { 555 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 556 } 557 558 /** 559 * queue_delayed_work - queue work on a workqueue after delay 560 * @wq: workqueue to use 561 * @dwork: delayable work to queue 562 * @delay: number of jiffies to wait before queueing 563 * 564 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 565 */ 566 static inline bool queue_delayed_work(struct workqueue_struct *wq, 567 struct delayed_work *dwork, 568 unsigned long delay) 569 { 570 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 571 } 572 573 /** 574 * mod_delayed_work - modify delay of or queue a delayed work 575 * @wq: workqueue to use 576 * @dwork: work to queue 577 * @delay: number of jiffies to wait before queueing 578 * 579 * mod_delayed_work_on() on local CPU. 580 */ 581 static inline bool mod_delayed_work(struct workqueue_struct *wq, 582 struct delayed_work *dwork, 583 unsigned long delay) 584 { 585 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 586 } 587 588 /** 589 * schedule_work_on - put work task on a specific cpu 590 * @cpu: cpu to put the work task on 591 * @work: job to be done 592 * 593 * This puts a job on a specific cpu 594 */ 595 static inline bool schedule_work_on(int cpu, struct work_struct *work) 596 { 597 return queue_work_on(cpu, system_wq, work); 598 } 599 600 /** 601 * schedule_work - put work task in global workqueue 602 * @work: job to be done 603 * 604 * Returns %false if @work was already on the kernel-global workqueue and 605 * %true otherwise. 606 * 607 * This puts a job in the kernel-global workqueue if it was not already 608 * queued and leaves it in the same position on the kernel-global 609 * workqueue otherwise. 610 * 611 * Shares the same memory-ordering properties of queue_work(), cf. the 612 * DocBook header of queue_work(). 613 */ 614 static inline bool schedule_work(struct work_struct *work) 615 { 616 return queue_work(system_wq, work); 617 } 618 619 /* 620 * Detect attempt to flush system-wide workqueues at compile time when possible. 621 * Warn attempt to flush system-wide workqueues at runtime. 622 * 623 * See https://lkml.kernel.org/r/[email protected] 624 * for reasons and steps for converting system-wide workqueues into local workqueues. 625 */ 626 extern void __warn_flushing_systemwide_wq(void) 627 __compiletime_warning("Please avoid flushing system-wide workqueues."); 628 629 /* Please stop using this function, for this function will be removed in near future. */ 630 #define flush_scheduled_work() \ 631 ({ \ 632 __warn_flushing_systemwide_wq(); \ 633 __flush_workqueue(system_wq); \ 634 }) 635 636 #define flush_workqueue(wq) \ 637 ({ \ 638 struct workqueue_struct *_wq = (wq); \ 639 \ 640 if ((__builtin_constant_p(_wq == system_wq) && \ 641 _wq == system_wq) || \ 642 (__builtin_constant_p(_wq == system_highpri_wq) && \ 643 _wq == system_highpri_wq) || \ 644 (__builtin_constant_p(_wq == system_long_wq) && \ 645 _wq == system_long_wq) || \ 646 (__builtin_constant_p(_wq == system_unbound_wq) && \ 647 _wq == system_unbound_wq) || \ 648 (__builtin_constant_p(_wq == system_freezable_wq) && \ 649 _wq == system_freezable_wq) || \ 650 (__builtin_constant_p(_wq == system_power_efficient_wq) && \ 651 _wq == system_power_efficient_wq) || \ 652 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ 653 _wq == system_freezable_power_efficient_wq)) \ 654 __warn_flushing_systemwide_wq(); \ 655 __flush_workqueue(_wq); \ 656 }) 657 658 /** 659 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 660 * @cpu: cpu to use 661 * @dwork: job to be done 662 * @delay: number of jiffies to wait 663 * 664 * After waiting for a given time this puts a job in the kernel-global 665 * workqueue on the specified CPU. 666 */ 667 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 668 unsigned long delay) 669 { 670 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 671 } 672 673 /** 674 * schedule_delayed_work - put work task in global workqueue after delay 675 * @dwork: job to be done 676 * @delay: number of jiffies to wait or 0 for immediate execution 677 * 678 * After waiting for a given time this puts a job in the kernel-global 679 * workqueue. 680 */ 681 static inline bool schedule_delayed_work(struct delayed_work *dwork, 682 unsigned long delay) 683 { 684 return queue_delayed_work(system_wq, dwork, delay); 685 } 686 687 #ifndef CONFIG_SMP 688 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 689 { 690 return fn(arg); 691 } 692 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 693 { 694 return fn(arg); 695 } 696 #else 697 long work_on_cpu_key(int cpu, long (*fn)(void *), 698 void *arg, struct lock_class_key *key); 699 /* 700 * A new key is defined for each caller to make sure the work 701 * associated with the function doesn't share its locking class. 702 */ 703 #define work_on_cpu(_cpu, _fn, _arg) \ 704 ({ \ 705 static struct lock_class_key __key; \ 706 \ 707 work_on_cpu_key(_cpu, _fn, _arg, &__key); \ 708 }) 709 710 long work_on_cpu_safe_key(int cpu, long (*fn)(void *), 711 void *arg, struct lock_class_key *key); 712 713 /* 714 * A new key is defined for each caller to make sure the work 715 * associated with the function doesn't share its locking class. 716 */ 717 #define work_on_cpu_safe(_cpu, _fn, _arg) \ 718 ({ \ 719 static struct lock_class_key __key; \ 720 \ 721 work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ 722 }) 723 #endif /* CONFIG_SMP */ 724 725 #ifdef CONFIG_FREEZER 726 extern void freeze_workqueues_begin(void); 727 extern bool freeze_workqueues_busy(void); 728 extern void thaw_workqueues(void); 729 #endif /* CONFIG_FREEZER */ 730 731 #ifdef CONFIG_SYSFS 732 int workqueue_sysfs_register(struct workqueue_struct *wq); 733 #else /* CONFIG_SYSFS */ 734 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) 735 { return 0; } 736 #endif /* CONFIG_SYSFS */ 737 738 #ifdef CONFIG_WQ_WATCHDOG 739 void wq_watchdog_touch(int cpu); 740 #else /* CONFIG_WQ_WATCHDOG */ 741 static inline void wq_watchdog_touch(int cpu) { } 742 #endif /* CONFIG_WQ_WATCHDOG */ 743 744 #ifdef CONFIG_SMP 745 int workqueue_prepare_cpu(unsigned int cpu); 746 int workqueue_online_cpu(unsigned int cpu); 747 int workqueue_offline_cpu(unsigned int cpu); 748 #endif 749 750 void __init workqueue_init_early(void); 751 void __init workqueue_init(void); 752 void __init workqueue_init_topology(void); 753 754 #endif 755