1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * workqueue.h --- work queue handling for Linux. 4 */ 5 6 #ifndef _LINUX_WORKQUEUE_H 7 #define _LINUX_WORKQUEUE_H 8 9 #include <linux/timer.h> 10 #include <linux/linkage.h> 11 #include <linux/bitops.h> 12 #include <linux/lockdep.h> 13 #include <linux/threads.h> 14 #include <linux/atomic.h> 15 #include <linux/cpumask.h> 16 #include <linux/rcupdate.h> 17 18 struct workqueue_struct; 19 20 struct work_struct; 21 typedef void (*work_func_t)(struct work_struct *work); 22 void delayed_work_timer_fn(struct timer_list *t); 23 24 /* 25 * The first word is the work queue pointer and the flags rolled into 26 * one 27 */ 28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) 29 30 enum { 31 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 32 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ 33 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ 34 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 35 #ifdef CONFIG_DEBUG_OBJECTS_WORK 36 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ 37 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ 38 #else 39 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ 40 #endif 41 42 WORK_STRUCT_COLOR_BITS = 4, 43 44 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 45 WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, 46 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 47 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 48 #ifdef CONFIG_DEBUG_OBJECTS_WORK 49 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 50 #else 51 WORK_STRUCT_STATIC = 0, 52 #endif 53 54 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), 55 56 /* not bound to any CPU, prefer the local CPU */ 57 WORK_CPU_UNBOUND = NR_CPUS, 58 59 /* 60 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. 61 * This makes pwqs aligned to 256 bytes and allows 16 workqueue 62 * flush colors. 63 */ 64 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 65 WORK_STRUCT_COLOR_BITS, 66 67 /* data contains off-queue information when !WORK_STRUCT_PWQ */ 68 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, 69 70 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, 71 72 /* 73 * When a work item is off queue, its high bits point to the last 74 * pool it was on. Cap at 31 bits and use the highest number to 75 * indicate that no pool is associated. 76 */ 77 WORK_OFFQ_FLAG_BITS = 1, 78 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 79 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, 80 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 81 82 /* bit mask for work_busy() return values */ 83 WORK_BUSY_PENDING = 1 << 0, 84 WORK_BUSY_RUNNING = 1 << 1, 85 86 /* maximum string length for set_worker_desc() */ 87 WORKER_DESC_LEN = 24, 88 }; 89 90 /* Convenience constants - of type 'unsigned long', not 'enum'! */ 91 #define WORK_OFFQ_CANCELING (1ul << __WORK_OFFQ_CANCELING) 92 #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) 93 #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) 94 95 #define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1) 96 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) 97 98 struct work_struct { 99 atomic_long_t data; 100 struct list_head entry; 101 work_func_t func; 102 #ifdef CONFIG_LOCKDEP 103 struct lockdep_map lockdep_map; 104 #endif 105 }; 106 107 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) 108 #define WORK_DATA_STATIC_INIT() \ 109 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) 110 111 struct delayed_work { 112 struct work_struct work; 113 struct timer_list timer; 114 115 /* target workqueue and CPU ->timer uses to queue ->work */ 116 struct workqueue_struct *wq; 117 int cpu; 118 }; 119 120 struct rcu_work { 121 struct work_struct work; 122 struct rcu_head rcu; 123 124 /* target workqueue ->rcu uses to queue ->work */ 125 struct workqueue_struct *wq; 126 }; 127 128 enum wq_affn_scope { 129 WQ_AFFN_NUMA, /* one pod per NUMA node */ 130 WQ_AFFN_SYSTEM, /* one pod across the whole system */ 131 132 WQ_AFFN_NR_TYPES, 133 134 WQ_AFFN_DFL = WQ_AFFN_NUMA, 135 }; 136 137 /** 138 * struct workqueue_attrs - A struct for workqueue attributes. 139 * 140 * This can be used to change attributes of an unbound workqueue. 141 */ 142 struct workqueue_attrs { 143 /** 144 * @nice: nice level 145 */ 146 int nice; 147 148 /** 149 * @cpumask: allowed CPUs 150 */ 151 cpumask_var_t cpumask; 152 153 /* 154 * Below fields aren't properties of a worker_pool. They only modify how 155 * :c:func:`apply_workqueue_attrs` select pools and thus don't 156 * participate in pool hash calculations or equality comparisons. 157 */ 158 159 /** 160 * @affn_scope: unbound CPU affinity scope 161 * 162 * CPU pods are used to improve execution locality of unbound work 163 * items. There are multiple pod types, one for each wq_affn_scope, and 164 * every CPU in the system belongs to one pod in every pod type. CPUs 165 * that belong to the same pod share the worker pool. For example, 166 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker 167 * pool for each NUMA node. 168 */ 169 enum wq_affn_scope affn_scope; 170 171 /** 172 * @ordered: work items must be executed one by one in queueing order 173 */ 174 bool ordered; 175 }; 176 177 static inline struct delayed_work *to_delayed_work(struct work_struct *work) 178 { 179 return container_of(work, struct delayed_work, work); 180 } 181 182 static inline struct rcu_work *to_rcu_work(struct work_struct *work) 183 { 184 return container_of(work, struct rcu_work, work); 185 } 186 187 struct execute_work { 188 struct work_struct work; 189 }; 190 191 #ifdef CONFIG_LOCKDEP 192 /* 193 * NB: because we have to copy the lockdep_map, setting _key 194 * here is required, otherwise it could get initialised to the 195 * copy of the lockdep_map! 196 */ 197 #define __WORK_INIT_LOCKDEP_MAP(n, k) \ 198 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), 199 #else 200 #define __WORK_INIT_LOCKDEP_MAP(n, k) 201 #endif 202 203 #define __WORK_INITIALIZER(n, f) { \ 204 .data = WORK_DATA_STATIC_INIT(), \ 205 .entry = { &(n).entry, &(n).entry }, \ 206 .func = (f), \ 207 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 208 } 209 210 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 211 .work = __WORK_INITIALIZER((n).work, (f)), \ 212 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ 213 (tflags) | TIMER_IRQSAFE), \ 214 } 215 216 #define DECLARE_WORK(n, f) \ 217 struct work_struct n = __WORK_INITIALIZER(n, f) 218 219 #define DECLARE_DELAYED_WORK(n, f) \ 220 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 221 222 #define DECLARE_DEFERRABLE_WORK(n, f) \ 223 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 224 225 #ifdef CONFIG_DEBUG_OBJECTS_WORK 226 extern void __init_work(struct work_struct *work, int onstack); 227 extern void destroy_work_on_stack(struct work_struct *work); 228 extern void destroy_delayed_work_on_stack(struct delayed_work *work); 229 static inline unsigned int work_static(struct work_struct *work) 230 { 231 return *work_data_bits(work) & WORK_STRUCT_STATIC; 232 } 233 #else 234 static inline void __init_work(struct work_struct *work, int onstack) { } 235 static inline void destroy_work_on_stack(struct work_struct *work) { } 236 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } 237 static inline unsigned int work_static(struct work_struct *work) { return 0; } 238 #endif 239 240 /* 241 * initialize all of a work item in one go 242 * 243 * NOTE! No point in using "atomic_long_set()": using a direct 244 * assignment of the work data initializer allows the compiler 245 * to generate better code. 246 */ 247 #ifdef CONFIG_LOCKDEP 248 #define __INIT_WORK(_work, _func, _onstack) \ 249 do { \ 250 static struct lock_class_key __key; \ 251 \ 252 __init_work((_work), _onstack); \ 253 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 254 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ 255 INIT_LIST_HEAD(&(_work)->entry); \ 256 (_work)->func = (_func); \ 257 } while (0) 258 #else 259 #define __INIT_WORK(_work, _func, _onstack) \ 260 do { \ 261 __init_work((_work), _onstack); \ 262 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 263 INIT_LIST_HEAD(&(_work)->entry); \ 264 (_work)->func = (_func); \ 265 } while (0) 266 #endif 267 268 #define INIT_WORK(_work, _func) \ 269 __INIT_WORK((_work), (_func), 0) 270 271 #define INIT_WORK_ONSTACK(_work, _func) \ 272 __INIT_WORK((_work), (_func), 1) 273 274 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 275 do { \ 276 INIT_WORK(&(_work)->work, (_func)); \ 277 __init_timer(&(_work)->timer, \ 278 delayed_work_timer_fn, \ 279 (_tflags) | TIMER_IRQSAFE); \ 280 } while (0) 281 282 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 283 do { \ 284 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 285 __init_timer_on_stack(&(_work)->timer, \ 286 delayed_work_timer_fn, \ 287 (_tflags) | TIMER_IRQSAFE); \ 288 } while (0) 289 290 #define INIT_DELAYED_WORK(_work, _func) \ 291 __INIT_DELAYED_WORK(_work, _func, 0) 292 293 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 294 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 295 296 #define INIT_DEFERRABLE_WORK(_work, _func) \ 297 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 298 299 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 300 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 301 302 #define INIT_RCU_WORK(_work, _func) \ 303 INIT_WORK(&(_work)->work, (_func)) 304 305 #define INIT_RCU_WORK_ONSTACK(_work, _func) \ 306 INIT_WORK_ONSTACK(&(_work)->work, (_func)) 307 308 /** 309 * work_pending - Find out whether a work item is currently pending 310 * @work: The work item in question 311 */ 312 #define work_pending(work) \ 313 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 314 315 /** 316 * delayed_work_pending - Find out whether a delayable work item is currently 317 * pending 318 * @w: The work item in question 319 */ 320 #define delayed_work_pending(w) \ 321 work_pending(&(w)->work) 322 323 /* 324 * Workqueue flags and constants. For details, please refer to 325 * Documentation/core-api/workqueue.rst. 326 */ 327 enum { 328 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 329 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 330 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 331 WQ_HIGHPRI = 1 << 4, /* high priority */ 332 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ 333 WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ 334 335 /* 336 * Per-cpu workqueues are generally preferred because they tend to 337 * show better performance thanks to cache locality. Per-cpu 338 * workqueues exclude the scheduler from choosing the CPU to 339 * execute the worker threads, which has an unfortunate side effect 340 * of increasing power consumption. 341 * 342 * The scheduler considers a CPU idle if it doesn't have any task 343 * to execute and tries to keep idle cores idle to conserve power; 344 * however, for example, a per-cpu work item scheduled from an 345 * interrupt handler on an idle CPU will force the scheduler to 346 * execute the work item on that CPU breaking the idleness, which in 347 * turn may lead to more scheduling choices which are sub-optimal 348 * in terms of power consumption. 349 * 350 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default 351 * but become unbound if workqueue.power_efficient kernel param is 352 * specified. Per-cpu workqueues which are identified to 353 * contribute significantly to power-consumption are identified and 354 * marked with this flag and enabling the power_efficient mode 355 * leads to noticeable power saving at the cost of small 356 * performance disadvantage. 357 * 358 * http://thread.gmane.org/gmane.linux.kernel/1480396 359 */ 360 WQ_POWER_EFFICIENT = 1 << 7, 361 362 __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ 363 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 364 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 365 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ 366 __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ 367 368 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 369 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, 370 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 371 }; 372 373 /* 374 * System-wide workqueues which are always present. 375 * 376 * system_wq is the one used by schedule[_delayed]_work[_on](). 377 * Multi-CPU multi-threaded. There are users which expect relatively 378 * short queue flush time. Don't queue works which can run for too 379 * long. 380 * 381 * system_highpri_wq is similar to system_wq but for work items which 382 * require WQ_HIGHPRI. 383 * 384 * system_long_wq is similar to system_wq but may host long running 385 * works. Queue flushing might take relatively long. 386 * 387 * system_unbound_wq is unbound workqueue. Workers are not bound to 388 * any specific CPU, not concurrency managed, and all queued works are 389 * executed immediately as long as max_active limit is not reached and 390 * resources are available. 391 * 392 * system_freezable_wq is equivalent to system_wq except that it's 393 * freezable. 394 * 395 * *_power_efficient_wq are inclined towards saving power and converted 396 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, 397 * they are same as their non-power-efficient counterparts - e.g. 398 * system_power_efficient_wq is identical to system_wq if 399 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. 400 */ 401 extern struct workqueue_struct *system_wq; 402 extern struct workqueue_struct *system_highpri_wq; 403 extern struct workqueue_struct *system_long_wq; 404 extern struct workqueue_struct *system_unbound_wq; 405 extern struct workqueue_struct *system_freezable_wq; 406 extern struct workqueue_struct *system_power_efficient_wq; 407 extern struct workqueue_struct *system_freezable_power_efficient_wq; 408 409 /** 410 * alloc_workqueue - allocate a workqueue 411 * @fmt: printf format for the name of the workqueue 412 * @flags: WQ_* flags 413 * @max_active: max in-flight work items per CPU, 0 for default 414 * remaining args: args for @fmt 415 * 416 * Allocate a workqueue with the specified parameters. For detailed 417 * information on WQ_* flags, please refer to 418 * Documentation/core-api/workqueue.rst. 419 * 420 * RETURNS: 421 * Pointer to the allocated workqueue on success, %NULL on failure. 422 */ 423 __printf(1, 4) struct workqueue_struct * 424 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); 425 426 /** 427 * alloc_ordered_workqueue - allocate an ordered workqueue 428 * @fmt: printf format for the name of the workqueue 429 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 430 * @args: args for @fmt 431 * 432 * Allocate an ordered workqueue. An ordered workqueue executes at 433 * most one work item at any given time in the queued order. They are 434 * implemented as unbound workqueues with @max_active of one. 435 * 436 * RETURNS: 437 * Pointer to the allocated workqueue on success, %NULL on failure. 438 */ 439 #define alloc_ordered_workqueue(fmt, flags, args...) \ 440 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ 441 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) 442 443 #define create_workqueue(name) \ 444 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) 445 #define create_freezable_workqueue(name) \ 446 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ 447 WQ_MEM_RECLAIM, 1, (name)) 448 #define create_singlethread_workqueue(name) \ 449 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) 450 451 extern void destroy_workqueue(struct workqueue_struct *wq); 452 453 struct workqueue_attrs *alloc_workqueue_attrs(void); 454 void free_workqueue_attrs(struct workqueue_attrs *attrs); 455 int apply_workqueue_attrs(struct workqueue_struct *wq, 456 const struct workqueue_attrs *attrs); 457 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); 458 459 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 460 struct work_struct *work); 461 extern bool queue_work_node(int node, struct workqueue_struct *wq, 462 struct work_struct *work); 463 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 464 struct delayed_work *work, unsigned long delay); 465 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 466 struct delayed_work *dwork, unsigned long delay); 467 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); 468 469 extern void __flush_workqueue(struct workqueue_struct *wq); 470 extern void drain_workqueue(struct workqueue_struct *wq); 471 472 extern int schedule_on_each_cpu(work_func_t func); 473 474 int execute_in_process_context(work_func_t fn, struct execute_work *); 475 476 extern bool flush_work(struct work_struct *work); 477 extern bool cancel_work(struct work_struct *work); 478 extern bool cancel_work_sync(struct work_struct *work); 479 480 extern bool flush_delayed_work(struct delayed_work *dwork); 481 extern bool cancel_delayed_work(struct delayed_work *dwork); 482 extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 483 484 extern bool flush_rcu_work(struct rcu_work *rwork); 485 486 extern void workqueue_set_max_active(struct workqueue_struct *wq, 487 int max_active); 488 extern struct work_struct *current_work(void); 489 extern bool current_is_workqueue_rescuer(void); 490 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 491 extern unsigned int work_busy(struct work_struct *work); 492 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 493 extern void print_worker_info(const char *log_lvl, struct task_struct *task); 494 extern void show_all_workqueues(void); 495 extern void show_freezable_workqueues(void); 496 extern void show_one_workqueue(struct workqueue_struct *wq); 497 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); 498 499 /** 500 * queue_work - queue work on a workqueue 501 * @wq: workqueue to use 502 * @work: work to queue 503 * 504 * Returns %false if @work was already on a queue, %true otherwise. 505 * 506 * We queue the work to the CPU on which it was submitted, but if the CPU dies 507 * it can be processed by another CPU. 508 * 509 * Memory-ordering properties: If it returns %true, guarantees that all stores 510 * preceding the call to queue_work() in the program order will be visible from 511 * the CPU which will execute @work by the time such work executes, e.g., 512 * 513 * { x is initially 0 } 514 * 515 * CPU0 CPU1 516 * 517 * WRITE_ONCE(x, 1); [ @work is being executed ] 518 * r0 = queue_work(wq, work); r1 = READ_ONCE(x); 519 * 520 * Forbids: r0 == true && r1 == 0 521 */ 522 static inline bool queue_work(struct workqueue_struct *wq, 523 struct work_struct *work) 524 { 525 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 526 } 527 528 /** 529 * queue_delayed_work - queue work on a workqueue after delay 530 * @wq: workqueue to use 531 * @dwork: delayable work to queue 532 * @delay: number of jiffies to wait before queueing 533 * 534 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 535 */ 536 static inline bool queue_delayed_work(struct workqueue_struct *wq, 537 struct delayed_work *dwork, 538 unsigned long delay) 539 { 540 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 541 } 542 543 /** 544 * mod_delayed_work - modify delay of or queue a delayed work 545 * @wq: workqueue to use 546 * @dwork: work to queue 547 * @delay: number of jiffies to wait before queueing 548 * 549 * mod_delayed_work_on() on local CPU. 550 */ 551 static inline bool mod_delayed_work(struct workqueue_struct *wq, 552 struct delayed_work *dwork, 553 unsigned long delay) 554 { 555 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 556 } 557 558 /** 559 * schedule_work_on - put work task on a specific cpu 560 * @cpu: cpu to put the work task on 561 * @work: job to be done 562 * 563 * This puts a job on a specific cpu 564 */ 565 static inline bool schedule_work_on(int cpu, struct work_struct *work) 566 { 567 return queue_work_on(cpu, system_wq, work); 568 } 569 570 /** 571 * schedule_work - put work task in global workqueue 572 * @work: job to be done 573 * 574 * Returns %false if @work was already on the kernel-global workqueue and 575 * %true otherwise. 576 * 577 * This puts a job in the kernel-global workqueue if it was not already 578 * queued and leaves it in the same position on the kernel-global 579 * workqueue otherwise. 580 * 581 * Shares the same memory-ordering properties of queue_work(), cf. the 582 * DocBook header of queue_work(). 583 */ 584 static inline bool schedule_work(struct work_struct *work) 585 { 586 return queue_work(system_wq, work); 587 } 588 589 /* 590 * Detect attempt to flush system-wide workqueues at compile time when possible. 591 * Warn attempt to flush system-wide workqueues at runtime. 592 * 593 * See https://lkml.kernel.org/r/[email protected] 594 * for reasons and steps for converting system-wide workqueues into local workqueues. 595 */ 596 extern void __warn_flushing_systemwide_wq(void) 597 __compiletime_warning("Please avoid flushing system-wide workqueues."); 598 599 /* Please stop using this function, for this function will be removed in near future. */ 600 #define flush_scheduled_work() \ 601 ({ \ 602 __warn_flushing_systemwide_wq(); \ 603 __flush_workqueue(system_wq); \ 604 }) 605 606 #define flush_workqueue(wq) \ 607 ({ \ 608 struct workqueue_struct *_wq = (wq); \ 609 \ 610 if ((__builtin_constant_p(_wq == system_wq) && \ 611 _wq == system_wq) || \ 612 (__builtin_constant_p(_wq == system_highpri_wq) && \ 613 _wq == system_highpri_wq) || \ 614 (__builtin_constant_p(_wq == system_long_wq) && \ 615 _wq == system_long_wq) || \ 616 (__builtin_constant_p(_wq == system_unbound_wq) && \ 617 _wq == system_unbound_wq) || \ 618 (__builtin_constant_p(_wq == system_freezable_wq) && \ 619 _wq == system_freezable_wq) || \ 620 (__builtin_constant_p(_wq == system_power_efficient_wq) && \ 621 _wq == system_power_efficient_wq) || \ 622 (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ 623 _wq == system_freezable_power_efficient_wq)) \ 624 __warn_flushing_systemwide_wq(); \ 625 __flush_workqueue(_wq); \ 626 }) 627 628 /** 629 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 630 * @cpu: cpu to use 631 * @dwork: job to be done 632 * @delay: number of jiffies to wait 633 * 634 * After waiting for a given time this puts a job in the kernel-global 635 * workqueue on the specified CPU. 636 */ 637 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 638 unsigned long delay) 639 { 640 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 641 } 642 643 /** 644 * schedule_delayed_work - put work task in global workqueue after delay 645 * @dwork: job to be done 646 * @delay: number of jiffies to wait or 0 for immediate execution 647 * 648 * After waiting for a given time this puts a job in the kernel-global 649 * workqueue. 650 */ 651 static inline bool schedule_delayed_work(struct delayed_work *dwork, 652 unsigned long delay) 653 { 654 return queue_delayed_work(system_wq, dwork, delay); 655 } 656 657 #ifndef CONFIG_SMP 658 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 659 { 660 return fn(arg); 661 } 662 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 663 { 664 return fn(arg); 665 } 666 #else 667 long work_on_cpu(int cpu, long (*fn)(void *), void *arg); 668 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg); 669 #endif /* CONFIG_SMP */ 670 671 #ifdef CONFIG_FREEZER 672 extern void freeze_workqueues_begin(void); 673 extern bool freeze_workqueues_busy(void); 674 extern void thaw_workqueues(void); 675 #endif /* CONFIG_FREEZER */ 676 677 #ifdef CONFIG_SYSFS 678 int workqueue_sysfs_register(struct workqueue_struct *wq); 679 #else /* CONFIG_SYSFS */ 680 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) 681 { return 0; } 682 #endif /* CONFIG_SYSFS */ 683 684 #ifdef CONFIG_WQ_WATCHDOG 685 void wq_watchdog_touch(int cpu); 686 #else /* CONFIG_WQ_WATCHDOG */ 687 static inline void wq_watchdog_touch(int cpu) { } 688 #endif /* CONFIG_WQ_WATCHDOG */ 689 690 #ifdef CONFIG_SMP 691 int workqueue_prepare_cpu(unsigned int cpu); 692 int workqueue_online_cpu(unsigned int cpu); 693 int workqueue_offline_cpu(unsigned int cpu); 694 #endif 695 696 void __init workqueue_init_early(void); 697 void __init workqueue_init(void); 698 void __init workqueue_init_topology(void); 699 700 #endif 701