1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * workqueue.h --- work queue handling for Linux. 4 */ 5 6 #ifndef _LINUX_WORKQUEUE_H 7 #define _LINUX_WORKQUEUE_H 8 9 #include <linux/timer.h> 10 #include <linux/linkage.h> 11 #include <linux/bitops.h> 12 #include <linux/lockdep.h> 13 #include <linux/threads.h> 14 #include <linux/atomic.h> 15 #include <linux/cpumask.h> 16 17 struct workqueue_struct; 18 19 struct work_struct; 20 typedef void (*work_func_t)(struct work_struct *work); 21 void delayed_work_timer_fn(struct timer_list *t); 22 23 /* 24 * The first word is the work queue pointer and the flags rolled into 25 * one 26 */ 27 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) 28 29 enum { 30 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 31 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 32 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ 33 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 34 #ifdef CONFIG_DEBUG_OBJECTS_WORK 35 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ 36 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ 37 #else 38 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ 39 #endif 40 41 WORK_STRUCT_COLOR_BITS = 4, 42 43 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, 44 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, 45 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, 46 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, 47 #ifdef CONFIG_DEBUG_OBJECTS_WORK 48 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, 49 #else 50 WORK_STRUCT_STATIC = 0, 51 #endif 52 53 /* 54 * The last color is no color used for works which don't 55 * participate in workqueue flushing. 56 */ 57 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, 58 WORK_NO_COLOR = WORK_NR_COLORS, 59 60 /* not bound to any CPU, prefer the local CPU */ 61 WORK_CPU_UNBOUND = NR_CPUS, 62 63 /* 64 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. 65 * This makes pwqs aligned to 256 bytes and allows 15 workqueue 66 * flush colors. 67 */ 68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 69 WORK_STRUCT_COLOR_BITS, 70 71 /* data contains off-queue information when !WORK_STRUCT_PWQ */ 72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, 73 74 __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, 75 WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), 76 77 /* 78 * When a work item is off queue, its high bits point to the last 79 * pool it was on. Cap at 31 bits and use the highest number to 80 * indicate that no pool is associated. 81 */ 82 WORK_OFFQ_FLAG_BITS = 1, 83 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS, 84 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, 85 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, 86 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1, 87 88 /* convenience constants */ 89 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 90 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 91 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT, 92 93 /* bit mask for work_busy() return values */ 94 WORK_BUSY_PENDING = 1 << 0, 95 WORK_BUSY_RUNNING = 1 << 1, 96 97 /* maximum string length for set_worker_desc() */ 98 WORKER_DESC_LEN = 24, 99 }; 100 101 struct work_struct { 102 atomic_long_t data; 103 struct list_head entry; 104 work_func_t func; 105 #ifdef CONFIG_LOCKDEP 106 struct lockdep_map lockdep_map; 107 #endif 108 }; 109 110 #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) 111 #define WORK_DATA_STATIC_INIT() \ 112 ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) 113 114 struct delayed_work { 115 struct work_struct work; 116 struct timer_list timer; 117 118 /* target workqueue and CPU ->timer uses to queue ->work */ 119 struct workqueue_struct *wq; 120 int cpu; 121 }; 122 123 /** 124 * struct workqueue_attrs - A struct for workqueue attributes. 125 * 126 * This can be used to change attributes of an unbound workqueue. 127 */ 128 struct workqueue_attrs { 129 /** 130 * @nice: nice level 131 */ 132 int nice; 133 134 /** 135 * @cpumask: allowed CPUs 136 */ 137 cpumask_var_t cpumask; 138 139 /** 140 * @no_numa: disable NUMA affinity 141 * 142 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It 143 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus 144 * doesn't participate in pool hash calculations or equality comparisons. 145 */ 146 bool no_numa; 147 }; 148 149 static inline struct delayed_work *to_delayed_work(struct work_struct *work) 150 { 151 return container_of(work, struct delayed_work, work); 152 } 153 154 struct execute_work { 155 struct work_struct work; 156 }; 157 158 #ifdef CONFIG_LOCKDEP 159 /* 160 * NB: because we have to copy the lockdep_map, setting _key 161 * here is required, otherwise it could get initialised to the 162 * copy of the lockdep_map! 163 */ 164 #define __WORK_INIT_LOCKDEP_MAP(n, k) \ 165 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), 166 #else 167 #define __WORK_INIT_LOCKDEP_MAP(n, k) 168 #endif 169 170 #define __WORK_INITIALIZER(n, f) { \ 171 .data = WORK_DATA_STATIC_INIT(), \ 172 .entry = { &(n).entry, &(n).entry }, \ 173 .func = (f), \ 174 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 175 } 176 177 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ 178 .work = __WORK_INITIALIZER((n).work, (f)), \ 179 .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\ 180 (TIMER_DATA_TYPE)&(n.timer), \ 181 (tflags) | TIMER_IRQSAFE), \ 182 } 183 184 #define DECLARE_WORK(n, f) \ 185 struct work_struct n = __WORK_INITIALIZER(n, f) 186 187 #define DECLARE_DELAYED_WORK(n, f) \ 188 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) 189 190 #define DECLARE_DEFERRABLE_WORK(n, f) \ 191 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) 192 193 #ifdef CONFIG_DEBUG_OBJECTS_WORK 194 extern void __init_work(struct work_struct *work, int onstack); 195 extern void destroy_work_on_stack(struct work_struct *work); 196 extern void destroy_delayed_work_on_stack(struct delayed_work *work); 197 static inline unsigned int work_static(struct work_struct *work) 198 { 199 return *work_data_bits(work) & WORK_STRUCT_STATIC; 200 } 201 #else 202 static inline void __init_work(struct work_struct *work, int onstack) { } 203 static inline void destroy_work_on_stack(struct work_struct *work) { } 204 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } 205 static inline unsigned int work_static(struct work_struct *work) { return 0; } 206 #endif 207 208 /* 209 * initialize all of a work item in one go 210 * 211 * NOTE! No point in using "atomic_long_set()": using a direct 212 * assignment of the work data initializer allows the compiler 213 * to generate better code. 214 */ 215 #ifdef CONFIG_LOCKDEP 216 #define __INIT_WORK(_work, _func, _onstack) \ 217 do { \ 218 static struct lock_class_key __key; \ 219 \ 220 __init_work((_work), _onstack); \ 221 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 222 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \ 223 INIT_LIST_HEAD(&(_work)->entry); \ 224 (_work)->func = (_func); \ 225 } while (0) 226 #else 227 #define __INIT_WORK(_work, _func, _onstack) \ 228 do { \ 229 __init_work((_work), _onstack); \ 230 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 231 INIT_LIST_HEAD(&(_work)->entry); \ 232 (_work)->func = (_func); \ 233 } while (0) 234 #endif 235 236 #define INIT_WORK(_work, _func) \ 237 __INIT_WORK((_work), (_func), 0) 238 239 #define INIT_WORK_ONSTACK(_work, _func) \ 240 __INIT_WORK((_work), (_func), 1) 241 242 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ 243 do { \ 244 INIT_WORK(&(_work)->work, (_func)); \ 245 __setup_timer(&(_work)->timer, \ 246 (TIMER_FUNC_TYPE)delayed_work_timer_fn, \ 247 (TIMER_DATA_TYPE)&(_work)->timer, \ 248 (_tflags) | TIMER_IRQSAFE); \ 249 } while (0) 250 251 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ 252 do { \ 253 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 254 __setup_timer_on_stack(&(_work)->timer, \ 255 (TIMER_FUNC_TYPE)delayed_work_timer_fn,\ 256 (TIMER_DATA_TYPE)&(_work)->timer,\ 257 (_tflags) | TIMER_IRQSAFE); \ 258 } while (0) 259 260 #define INIT_DELAYED_WORK(_work, _func) \ 261 __INIT_DELAYED_WORK(_work, _func, 0) 262 263 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 264 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) 265 266 #define INIT_DEFERRABLE_WORK(_work, _func) \ 267 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) 268 269 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ 270 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) 271 272 /** 273 * work_pending - Find out whether a work item is currently pending 274 * @work: The work item in question 275 */ 276 #define work_pending(work) \ 277 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 278 279 /** 280 * delayed_work_pending - Find out whether a delayable work item is currently 281 * pending 282 * @w: The work item in question 283 */ 284 #define delayed_work_pending(w) \ 285 work_pending(&(w)->work) 286 287 /* 288 * Workqueue flags and constants. For details, please refer to 289 * Documentation/core-api/workqueue.rst. 290 */ 291 enum { 292 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 293 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ 294 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 295 WQ_HIGHPRI = 1 << 4, /* high priority */ 296 WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ 297 WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ 298 299 /* 300 * Per-cpu workqueues are generally preferred because they tend to 301 * show better performance thanks to cache locality. Per-cpu 302 * workqueues exclude the scheduler from choosing the CPU to 303 * execute the worker threads, which has an unfortunate side effect 304 * of increasing power consumption. 305 * 306 * The scheduler considers a CPU idle if it doesn't have any task 307 * to execute and tries to keep idle cores idle to conserve power; 308 * however, for example, a per-cpu work item scheduled from an 309 * interrupt handler on an idle CPU will force the scheduler to 310 * excute the work item on that CPU breaking the idleness, which in 311 * turn may lead to more scheduling choices which are sub-optimal 312 * in terms of power consumption. 313 * 314 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default 315 * but become unbound if workqueue.power_efficient kernel param is 316 * specified. Per-cpu workqueues which are identified to 317 * contribute significantly to power-consumption are identified and 318 * marked with this flag and enabling the power_efficient mode 319 * leads to noticeable power saving at the cost of small 320 * performance disadvantage. 321 * 322 * http://thread.gmane.org/gmane.linux.kernel/1480396 323 */ 324 WQ_POWER_EFFICIENT = 1 << 7, 325 326 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 327 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 328 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ 329 __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */ 330 331 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 332 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 333 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, 334 }; 335 336 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ 337 #define WQ_UNBOUND_MAX_ACTIVE \ 338 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) 339 340 /* 341 * System-wide workqueues which are always present. 342 * 343 * system_wq is the one used by schedule[_delayed]_work[_on](). 344 * Multi-CPU multi-threaded. There are users which expect relatively 345 * short queue flush time. Don't queue works which can run for too 346 * long. 347 * 348 * system_highpri_wq is similar to system_wq but for work items which 349 * require WQ_HIGHPRI. 350 * 351 * system_long_wq is similar to system_wq but may host long running 352 * works. Queue flushing might take relatively long. 353 * 354 * system_unbound_wq is unbound workqueue. Workers are not bound to 355 * any specific CPU, not concurrency managed, and all queued works are 356 * executed immediately as long as max_active limit is not reached and 357 * resources are available. 358 * 359 * system_freezable_wq is equivalent to system_wq except that it's 360 * freezable. 361 * 362 * *_power_efficient_wq are inclined towards saving power and converted 363 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, 364 * they are same as their non-power-efficient counterparts - e.g. 365 * system_power_efficient_wq is identical to system_wq if 366 * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. 367 */ 368 extern struct workqueue_struct *system_wq; 369 extern struct workqueue_struct *system_highpri_wq; 370 extern struct workqueue_struct *system_long_wq; 371 extern struct workqueue_struct *system_unbound_wq; 372 extern struct workqueue_struct *system_freezable_wq; 373 extern struct workqueue_struct *system_power_efficient_wq; 374 extern struct workqueue_struct *system_freezable_power_efficient_wq; 375 376 extern struct workqueue_struct * 377 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, 378 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); 379 380 /** 381 * alloc_workqueue - allocate a workqueue 382 * @fmt: printf format for the name of the workqueue 383 * @flags: WQ_* flags 384 * @max_active: max in-flight work items, 0 for default 385 * @args...: args for @fmt 386 * 387 * Allocate a workqueue with the specified parameters. For detailed 388 * information on WQ_* flags, please refer to 389 * Documentation/core-api/workqueue.rst. 390 * 391 * The __lock_name macro dance is to guarantee that single lock_class_key 392 * doesn't end up with different namesm, which isn't allowed by lockdep. 393 * 394 * RETURNS: 395 * Pointer to the allocated workqueue on success, %NULL on failure. 396 */ 397 #ifdef CONFIG_LOCKDEP 398 #define alloc_workqueue(fmt, flags, max_active, args...) \ 399 ({ \ 400 static struct lock_class_key __key; \ 401 const char *__lock_name; \ 402 \ 403 __lock_name = "(wq_completion)"#fmt#args; \ 404 \ 405 __alloc_workqueue_key((fmt), (flags), (max_active), \ 406 &__key, __lock_name, ##args); \ 407 }) 408 #else 409 #define alloc_workqueue(fmt, flags, max_active, args...) \ 410 __alloc_workqueue_key((fmt), (flags), (max_active), \ 411 NULL, NULL, ##args) 412 #endif 413 414 /** 415 * alloc_ordered_workqueue - allocate an ordered workqueue 416 * @fmt: printf format for the name of the workqueue 417 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 418 * @args...: args for @fmt 419 * 420 * Allocate an ordered workqueue. An ordered workqueue executes at 421 * most one work item at any given time in the queued order. They are 422 * implemented as unbound workqueues with @max_active of one. 423 * 424 * RETURNS: 425 * Pointer to the allocated workqueue on success, %NULL on failure. 426 */ 427 #define alloc_ordered_workqueue(fmt, flags, args...) \ 428 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ 429 __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) 430 431 #define create_workqueue(name) \ 432 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) 433 #define create_freezable_workqueue(name) \ 434 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ 435 WQ_MEM_RECLAIM, 1, (name)) 436 #define create_singlethread_workqueue(name) \ 437 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) 438 439 extern void destroy_workqueue(struct workqueue_struct *wq); 440 441 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); 442 void free_workqueue_attrs(struct workqueue_attrs *attrs); 443 int apply_workqueue_attrs(struct workqueue_struct *wq, 444 const struct workqueue_attrs *attrs); 445 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); 446 447 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 448 struct work_struct *work); 449 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 450 struct delayed_work *work, unsigned long delay); 451 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 452 struct delayed_work *dwork, unsigned long delay); 453 454 extern void flush_workqueue(struct workqueue_struct *wq); 455 extern void drain_workqueue(struct workqueue_struct *wq); 456 457 extern int schedule_on_each_cpu(work_func_t func); 458 459 int execute_in_process_context(work_func_t fn, struct execute_work *); 460 461 extern bool flush_work(struct work_struct *work); 462 extern bool cancel_work(struct work_struct *work); 463 extern bool cancel_work_sync(struct work_struct *work); 464 465 extern bool flush_delayed_work(struct delayed_work *dwork); 466 extern bool cancel_delayed_work(struct delayed_work *dwork); 467 extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 468 469 extern void workqueue_set_max_active(struct workqueue_struct *wq, 470 int max_active); 471 extern bool current_is_workqueue_rescuer(void); 472 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 473 extern unsigned int work_busy(struct work_struct *work); 474 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); 475 extern void print_worker_info(const char *log_lvl, struct task_struct *task); 476 extern void show_workqueue_state(void); 477 478 /** 479 * queue_work - queue work on a workqueue 480 * @wq: workqueue to use 481 * @work: work to queue 482 * 483 * Returns %false if @work was already on a queue, %true otherwise. 484 * 485 * We queue the work to the CPU on which it was submitted, but if the CPU dies 486 * it can be processed by another CPU. 487 */ 488 static inline bool queue_work(struct workqueue_struct *wq, 489 struct work_struct *work) 490 { 491 return queue_work_on(WORK_CPU_UNBOUND, wq, work); 492 } 493 494 /** 495 * queue_delayed_work - queue work on a workqueue after delay 496 * @wq: workqueue to use 497 * @dwork: delayable work to queue 498 * @delay: number of jiffies to wait before queueing 499 * 500 * Equivalent to queue_delayed_work_on() but tries to use the local CPU. 501 */ 502 static inline bool queue_delayed_work(struct workqueue_struct *wq, 503 struct delayed_work *dwork, 504 unsigned long delay) 505 { 506 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 507 } 508 509 /** 510 * mod_delayed_work - modify delay of or queue a delayed work 511 * @wq: workqueue to use 512 * @dwork: work to queue 513 * @delay: number of jiffies to wait before queueing 514 * 515 * mod_delayed_work_on() on local CPU. 516 */ 517 static inline bool mod_delayed_work(struct workqueue_struct *wq, 518 struct delayed_work *dwork, 519 unsigned long delay) 520 { 521 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); 522 } 523 524 /** 525 * schedule_work_on - put work task on a specific cpu 526 * @cpu: cpu to put the work task on 527 * @work: job to be done 528 * 529 * This puts a job on a specific cpu 530 */ 531 static inline bool schedule_work_on(int cpu, struct work_struct *work) 532 { 533 return queue_work_on(cpu, system_wq, work); 534 } 535 536 /** 537 * schedule_work - put work task in global workqueue 538 * @work: job to be done 539 * 540 * Returns %false if @work was already on the kernel-global workqueue and 541 * %true otherwise. 542 * 543 * This puts a job in the kernel-global workqueue if it was not already 544 * queued and leaves it in the same position on the kernel-global 545 * workqueue otherwise. 546 */ 547 static inline bool schedule_work(struct work_struct *work) 548 { 549 return queue_work(system_wq, work); 550 } 551 552 /** 553 * flush_scheduled_work - ensure that any scheduled work has run to completion. 554 * 555 * Forces execution of the kernel-global workqueue and blocks until its 556 * completion. 557 * 558 * Think twice before calling this function! It's very easy to get into 559 * trouble if you don't take great care. Either of the following situations 560 * will lead to deadlock: 561 * 562 * One of the work items currently on the workqueue needs to acquire 563 * a lock held by your code or its caller. 564 * 565 * Your code is running in the context of a work routine. 566 * 567 * They will be detected by lockdep when they occur, but the first might not 568 * occur very often. It depends on what work items are on the workqueue and 569 * what locks they need, which you have no control over. 570 * 571 * In most situations flushing the entire workqueue is overkill; you merely 572 * need to know that a particular work item isn't queued and isn't running. 573 * In such cases you should use cancel_delayed_work_sync() or 574 * cancel_work_sync() instead. 575 */ 576 static inline void flush_scheduled_work(void) 577 { 578 flush_workqueue(system_wq); 579 } 580 581 /** 582 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 583 * @cpu: cpu to use 584 * @dwork: job to be done 585 * @delay: number of jiffies to wait 586 * 587 * After waiting for a given time this puts a job in the kernel-global 588 * workqueue on the specified CPU. 589 */ 590 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, 591 unsigned long delay) 592 { 593 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 594 } 595 596 /** 597 * schedule_delayed_work - put work task in global workqueue after delay 598 * @dwork: job to be done 599 * @delay: number of jiffies to wait or 0 for immediate execution 600 * 601 * After waiting for a given time this puts a job in the kernel-global 602 * workqueue. 603 */ 604 static inline bool schedule_delayed_work(struct delayed_work *dwork, 605 unsigned long delay) 606 { 607 return queue_delayed_work(system_wq, dwork, delay); 608 } 609 610 #ifndef CONFIG_SMP 611 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 612 { 613 return fn(arg); 614 } 615 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 616 { 617 return fn(arg); 618 } 619 #else 620 long work_on_cpu(int cpu, long (*fn)(void *), void *arg); 621 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg); 622 #endif /* CONFIG_SMP */ 623 624 #ifdef CONFIG_FREEZER 625 extern void freeze_workqueues_begin(void); 626 extern bool freeze_workqueues_busy(void); 627 extern void thaw_workqueues(void); 628 #endif /* CONFIG_FREEZER */ 629 630 #ifdef CONFIG_SYSFS 631 int workqueue_sysfs_register(struct workqueue_struct *wq); 632 #else /* CONFIG_SYSFS */ 633 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) 634 { return 0; } 635 #endif /* CONFIG_SYSFS */ 636 637 #ifdef CONFIG_WQ_WATCHDOG 638 void wq_watchdog_touch(int cpu); 639 #else /* CONFIG_WQ_WATCHDOG */ 640 static inline void wq_watchdog_touch(int cpu) { } 641 #endif /* CONFIG_WQ_WATCHDOG */ 642 643 #ifdef CONFIG_SMP 644 int workqueue_prepare_cpu(unsigned int cpu); 645 int workqueue_online_cpu(unsigned int cpu); 646 int workqueue_offline_cpu(unsigned int cpu); 647 #endif 648 649 int __init workqueue_init_early(void); 650 int __init workqueue_init(void); 651 652 #endif 653