xref: /linux-6.15/include/linux/workqueue.h (revision ec0a7d44)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * workqueue.h --- work queue handling for Linux.
4  */
5 
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8 
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask_types.h>
16 #include <linux/rcupdate.h>
17 #include <linux/workqueue_types.h>
18 
19 /*
20  * The first word is the work queue pointer and the flags rolled into
21  * one
22  */
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
24 
25 enum work_bits {
26 	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
27 	WORK_STRUCT_INACTIVE_BIT,	/* work item is inactive */
28 	WORK_STRUCT_PWQ_BIT,		/* data points to pwq */
29 	WORK_STRUCT_LINKED_BIT,		/* next work is linked to this one */
30 #ifdef CONFIG_DEBUG_OBJECTS_WORK
31 	WORK_STRUCT_STATIC_BIT,		/* static initializer (debugobjects) */
32 #endif
33 	WORK_STRUCT_FLAG_BITS,
34 
35 	/* color for workqueue flushing */
36 	WORK_STRUCT_COLOR_SHIFT	= WORK_STRUCT_FLAG_BITS,
37 	WORK_STRUCT_COLOR_BITS	= 4,
38 
39 	/*
40 	 * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/
41 	 * debugobjects turned off. This makes pwqs aligned to 256 bytes (512
42 	 * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors.
43 	 *
44 	 * MSB
45 	 * [ pwq pointer ] [ flush color ] [ STRUCT flags ]
46 	 *                     4 bits        4 or 5 bits
47 	 */
48 	WORK_STRUCT_PWQ_SHIFT	= WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS,
49 
50 	/*
51 	 * data contains off-queue information when !WORK_STRUCT_PWQ.
52 	 *
53 	 * MSB
54 	 * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
55 	 *                  16 bits          1 bit        4 or 5 bits
56 	 */
57 	WORK_OFFQ_FLAG_SHIFT	= WORK_STRUCT_FLAG_BITS,
58 	WORK_OFFQ_BH_BIT	= WORK_OFFQ_FLAG_SHIFT,
59 	WORK_OFFQ_FLAG_END,
60 	WORK_OFFQ_FLAG_BITS	= WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT,
61 
62 	WORK_OFFQ_DISABLE_SHIFT	= WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS,
63 	WORK_OFFQ_DISABLE_BITS	= 16,
64 
65 	/*
66 	 * When a work item is off queue, the high bits encode off-queue flags
67 	 * and the last pool it was on. Cap pool ID to 31 bits and use the
68 	 * highest number to indicate that no pool is associated.
69 	 */
70 	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS,
71 	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
72 	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
73 };
74 
75 enum work_flags {
76 	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
77 	WORK_STRUCT_INACTIVE	= 1 << WORK_STRUCT_INACTIVE_BIT,
78 	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
79 	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
80 #ifdef CONFIG_DEBUG_OBJECTS_WORK
81 	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
82 #else
83 	WORK_STRUCT_STATIC	= 0,
84 #endif
85 };
86 
87 enum wq_misc_consts {
88 	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS),
89 
90 	/* not bound to any CPU, prefer the local CPU */
91 	WORK_CPU_UNBOUND	= NR_CPUS,
92 
93 	/* bit mask for work_busy() return values */
94 	WORK_BUSY_PENDING	= 1 << 0,
95 	WORK_BUSY_RUNNING	= 1 << 1,
96 
97 	/* maximum string length for set_worker_desc() */
98 	WORKER_DESC_LEN		= 32,
99 };
100 
101 /* Convenience constants - of type 'unsigned long', not 'enum'! */
102 #define WORK_OFFQ_BH		(1ul << WORK_OFFQ_BH_BIT)
103 #define WORK_OFFQ_FLAG_MASK	(((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
104 #define WORK_OFFQ_DISABLE_MASK	(((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
105 #define WORK_OFFQ_POOL_NONE	((1ul << WORK_OFFQ_POOL_BITS) - 1)
106 #define WORK_STRUCT_NO_POOL	(WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
107 #define WORK_STRUCT_PWQ_MASK	(~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
108 
109 #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
110 #define WORK_DATA_STATIC_INIT()	\
111 	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
112 
113 struct delayed_work {
114 	struct work_struct work;
115 	struct timer_list timer;
116 
117 	/* target workqueue and CPU ->timer uses to queue ->work */
118 	struct workqueue_struct *wq;
119 	int cpu;
120 };
121 
122 struct rcu_work {
123 	struct work_struct work;
124 	struct rcu_head rcu;
125 
126 	/* target workqueue ->rcu uses to queue ->work */
127 	struct workqueue_struct *wq;
128 };
129 
130 enum wq_affn_scope {
131 	WQ_AFFN_DFL,			/* use system default */
132 	WQ_AFFN_CPU,			/* one pod per CPU */
133 	WQ_AFFN_SMT,			/* one pod poer SMT */
134 	WQ_AFFN_CACHE,			/* one pod per LLC */
135 	WQ_AFFN_NUMA,			/* one pod per NUMA node */
136 	WQ_AFFN_SYSTEM,			/* one pod across the whole system */
137 
138 	WQ_AFFN_NR_TYPES,
139 };
140 
141 /**
142  * struct workqueue_attrs - A struct for workqueue attributes.
143  *
144  * This can be used to change attributes of an unbound workqueue.
145  */
146 struct workqueue_attrs {
147 	/**
148 	 * @nice: nice level
149 	 */
150 	int nice;
151 
152 	/**
153 	 * @cpumask: allowed CPUs
154 	 *
155 	 * Work items in this workqueue are affine to these CPUs and not allowed
156 	 * to execute on other CPUs. A pool serving a workqueue must have the
157 	 * same @cpumask.
158 	 */
159 	cpumask_var_t cpumask;
160 
161 	/**
162 	 * @__pod_cpumask: internal attribute used to create per-pod pools
163 	 *
164 	 * Internal use only.
165 	 *
166 	 * Per-pod unbound worker pools are used to improve locality. Always a
167 	 * subset of ->cpumask. A workqueue can be associated with multiple
168 	 * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
169 	 * of a pool's @__pod_cpumask is strict depends on @affn_strict.
170 	 */
171 	cpumask_var_t __pod_cpumask;
172 
173 	/**
174 	 * @affn_strict: affinity scope is strict
175 	 *
176 	 * If clear, workqueue will make a best-effort attempt at starting the
177 	 * worker inside @__pod_cpumask but the scheduler is free to migrate it
178 	 * outside.
179 	 *
180 	 * If set, workers are only allowed to run inside @__pod_cpumask.
181 	 */
182 	bool affn_strict;
183 
184 	/*
185 	 * Below fields aren't properties of a worker_pool. They only modify how
186 	 * :c:func:`apply_workqueue_attrs` select pools and thus don't
187 	 * participate in pool hash calculations or equality comparisons.
188 	 *
189 	 * If @affn_strict is set, @cpumask isn't a property of a worker_pool
190 	 * either.
191 	 */
192 
193 	/**
194 	 * @affn_scope: unbound CPU affinity scope
195 	 *
196 	 * CPU pods are used to improve execution locality of unbound work
197 	 * items. There are multiple pod types, one for each wq_affn_scope, and
198 	 * every CPU in the system belongs to one pod in every pod type. CPUs
199 	 * that belong to the same pod share the worker pool. For example,
200 	 * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
201 	 * pool for each NUMA node.
202 	 */
203 	enum wq_affn_scope affn_scope;
204 
205 	/**
206 	 * @ordered: work items must be executed one by one in queueing order
207 	 */
208 	bool ordered;
209 };
210 
211 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
212 {
213 	return container_of(work, struct delayed_work, work);
214 }
215 
216 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
217 {
218 	return container_of(work, struct rcu_work, work);
219 }
220 
221 struct execute_work {
222 	struct work_struct work;
223 };
224 
225 #ifdef CONFIG_LOCKDEP
226 /*
227  * NB: because we have to copy the lockdep_map, setting _key
228  * here is required, otherwise it could get initialised to the
229  * copy of the lockdep_map!
230  */
231 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
232 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
233 #else
234 #define __WORK_INIT_LOCKDEP_MAP(n, k)
235 #endif
236 
237 #define __WORK_INITIALIZER(n, f) {					\
238 	.data = WORK_DATA_STATIC_INIT(),				\
239 	.entry	= { &(n).entry, &(n).entry },				\
240 	.func = (f),							\
241 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
242 	}
243 
244 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
245 	.work = __WORK_INITIALIZER((n).work, (f)),			\
246 	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
247 				     (tflags) | TIMER_IRQSAFE),		\
248 	}
249 
250 #define DECLARE_WORK(n, f)						\
251 	struct work_struct n = __WORK_INITIALIZER(n, f)
252 
253 #define DECLARE_DELAYED_WORK(n, f)					\
254 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
255 
256 #define DECLARE_DEFERRABLE_WORK(n, f)					\
257 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
258 
259 #ifdef CONFIG_DEBUG_OBJECTS_WORK
260 extern void __init_work(struct work_struct *work, int onstack);
261 extern void destroy_work_on_stack(struct work_struct *work);
262 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
263 static inline unsigned int work_static(struct work_struct *work)
264 {
265 	return *work_data_bits(work) & WORK_STRUCT_STATIC;
266 }
267 #else
268 static inline void __init_work(struct work_struct *work, int onstack) { }
269 static inline void destroy_work_on_stack(struct work_struct *work) { }
270 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
271 static inline unsigned int work_static(struct work_struct *work) { return 0; }
272 #endif
273 
274 /*
275  * initialize all of a work item in one go
276  *
277  * NOTE! No point in using "atomic_long_set()": using a direct
278  * assignment of the work data initializer allows the compiler
279  * to generate better code.
280  */
281 #ifdef CONFIG_LOCKDEP
282 #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
283 	do {								\
284 		__init_work((_work), _onstack);				\
285 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
286 		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
287 		INIT_LIST_HEAD(&(_work)->entry);			\
288 		(_work)->func = (_func);				\
289 	} while (0)
290 #else
291 #define __INIT_WORK_KEY(_work, _func, _onstack, _key)			\
292 	do {								\
293 		__init_work((_work), _onstack);				\
294 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
295 		INIT_LIST_HEAD(&(_work)->entry);			\
296 		(_work)->func = (_func);				\
297 	} while (0)
298 #endif
299 
300 #define __INIT_WORK(_work, _func, _onstack)				\
301 	do {								\
302 		static __maybe_unused struct lock_class_key __key;	\
303 									\
304 		__INIT_WORK_KEY(_work, _func, _onstack, &__key);	\
305 	} while (0)
306 
307 #define INIT_WORK(_work, _func)						\
308 	__INIT_WORK((_work), (_func), 0)
309 
310 #define INIT_WORK_ONSTACK(_work, _func)					\
311 	__INIT_WORK((_work), (_func), 1)
312 
313 #define INIT_WORK_ONSTACK_KEY(_work, _func, _key)			\
314 	__INIT_WORK_KEY((_work), (_func), 1, _key)
315 
316 #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
317 	do {								\
318 		INIT_WORK(&(_work)->work, (_func));			\
319 		__init_timer(&(_work)->timer,				\
320 			     delayed_work_timer_fn,			\
321 			     (_tflags) | TIMER_IRQSAFE);		\
322 	} while (0)
323 
324 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
325 	do {								\
326 		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
327 		__init_timer_on_stack(&(_work)->timer,			\
328 				      delayed_work_timer_fn,		\
329 				      (_tflags) | TIMER_IRQSAFE);	\
330 	} while (0)
331 
332 #define INIT_DELAYED_WORK(_work, _func)					\
333 	__INIT_DELAYED_WORK(_work, _func, 0)
334 
335 #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
336 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
337 
338 #define INIT_DEFERRABLE_WORK(_work, _func)				\
339 	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
340 
341 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
342 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
343 
344 #define INIT_RCU_WORK(_work, _func)					\
345 	INIT_WORK(&(_work)->work, (_func))
346 
347 #define INIT_RCU_WORK_ONSTACK(_work, _func)				\
348 	INIT_WORK_ONSTACK(&(_work)->work, (_func))
349 
350 /**
351  * work_pending - Find out whether a work item is currently pending
352  * @work: The work item in question
353  */
354 #define work_pending(work) \
355 	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
356 
357 /**
358  * delayed_work_pending - Find out whether a delayable work item is currently
359  * pending
360  * @w: The work item in question
361  */
362 #define delayed_work_pending(w) \
363 	work_pending(&(w)->work)
364 
365 /*
366  * Workqueue flags and constants.  For details, please refer to
367  * Documentation/core-api/workqueue.rst.
368  */
369 enum wq_flags {
370 	WQ_BH			= 1 << 0, /* execute in bottom half (softirq) context */
371 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
372 	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
373 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
374 	WQ_HIGHPRI		= 1 << 4, /* high priority */
375 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
376 	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
377 
378 	/*
379 	 * Per-cpu workqueues are generally preferred because they tend to
380 	 * show better performance thanks to cache locality.  Per-cpu
381 	 * workqueues exclude the scheduler from choosing the CPU to
382 	 * execute the worker threads, which has an unfortunate side effect
383 	 * of increasing power consumption.
384 	 *
385 	 * The scheduler considers a CPU idle if it doesn't have any task
386 	 * to execute and tries to keep idle cores idle to conserve power;
387 	 * however, for example, a per-cpu work item scheduled from an
388 	 * interrupt handler on an idle CPU will force the scheduler to
389 	 * execute the work item on that CPU breaking the idleness, which in
390 	 * turn may lead to more scheduling choices which are sub-optimal
391 	 * in terms of power consumption.
392 	 *
393 	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
394 	 * but become unbound if workqueue.power_efficient kernel param is
395 	 * specified.  Per-cpu workqueues which are identified to
396 	 * contribute significantly to power-consumption are identified and
397 	 * marked with this flag and enabling the power_efficient mode
398 	 * leads to noticeable power saving at the cost of small
399 	 * performance disadvantage.
400 	 *
401 	 * http://thread.gmane.org/gmane.linux.kernel/1480396
402 	 */
403 	WQ_POWER_EFFICIENT	= 1 << 7,
404 
405 	__WQ_DESTROYING		= 1 << 15, /* internal: workqueue is destroying */
406 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
407 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
408 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
409 
410 	/* BH wq only allows the following flags */
411 	__WQ_BH_ALLOWS		= WQ_BH | WQ_HIGHPRI,
412 };
413 
414 enum wq_consts {
415 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
416 	WQ_UNBOUND_MAX_ACTIVE	= WQ_MAX_ACTIVE,
417 	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
418 
419 	/*
420 	 * Per-node default cap on min_active. Unless explicitly set, min_active
421 	 * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
422 	 * workqueue_struct->min_active definition.
423 	 */
424 	WQ_DFL_MIN_ACTIVE	= 8,
425 };
426 
427 /*
428  * System-wide workqueues which are always present.
429  *
430  * system_wq is the one used by schedule[_delayed]_work[_on]().
431  * Multi-CPU multi-threaded.  There are users which expect relatively
432  * short queue flush time.  Don't queue works which can run for too
433  * long.
434  *
435  * system_highpri_wq is similar to system_wq but for work items which
436  * require WQ_HIGHPRI.
437  *
438  * system_long_wq is similar to system_wq but may host long running
439  * works.  Queue flushing might take relatively long.
440  *
441  * system_unbound_wq is unbound workqueue.  Workers are not bound to
442  * any specific CPU, not concurrency managed, and all queued works are
443  * executed immediately as long as max_active limit is not reached and
444  * resources are available.
445  *
446  * system_freezable_wq is equivalent to system_wq except that it's
447  * freezable.
448  *
449  * *_power_efficient_wq are inclined towards saving power and converted
450  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
451  * they are same as their non-power-efficient counterparts - e.g.
452  * system_power_efficient_wq is identical to system_wq if
453  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
454  *
455  * system_bh[_highpri]_wq are convenience interface to softirq. BH work items
456  * are executed in the queueing CPU's BH context in the queueing order.
457  */
458 extern struct workqueue_struct *system_wq;
459 extern struct workqueue_struct *system_highpri_wq;
460 extern struct workqueue_struct *system_long_wq;
461 extern struct workqueue_struct *system_unbound_wq;
462 extern struct workqueue_struct *system_freezable_wq;
463 extern struct workqueue_struct *system_power_efficient_wq;
464 extern struct workqueue_struct *system_freezable_power_efficient_wq;
465 extern struct workqueue_struct *system_bh_wq;
466 extern struct workqueue_struct *system_bh_highpri_wq;
467 
468 void workqueue_softirq_action(bool highpri);
469 void workqueue_softirq_dead(unsigned int cpu);
470 
471 /**
472  * alloc_workqueue - allocate a workqueue
473  * @fmt: printf format for the name of the workqueue
474  * @flags: WQ_* flags
475  * @max_active: max in-flight work items, 0 for default
476  * @...: args for @fmt
477  *
478  * For a per-cpu workqueue, @max_active limits the number of in-flight work
479  * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
480  * executing at most one work item for the workqueue.
481  *
482  * For unbound workqueues, @max_active limits the number of in-flight work items
483  * for the whole system. e.g. @max_active of 16 indicates that that there can be
484  * at most 16 work items executing for the workqueue in the whole system.
485  *
486  * As sharing the same active counter for an unbound workqueue across multiple
487  * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
488  * according to the proportion of the number of online CPUs and enforced
489  * independently.
490  *
491  * Depending on online CPU distribution, a node may end up with per-node
492  * max_active which is significantly lower than @max_active, which can lead to
493  * deadlocks if the per-node concurrency limit is lower than the maximum number
494  * of interdependent work items for the workqueue.
495  *
496  * To guarantee forward progress regardless of online CPU distribution, the
497  * concurrency limit on every node is guaranteed to be equal to or greater than
498  * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
499  * that the sum of per-node max_active's may be larger than @max_active.
500  *
501  * For detailed information on %WQ_* flags, please refer to
502  * Documentation/core-api/workqueue.rst.
503  *
504  * RETURNS:
505  * Pointer to the allocated workqueue on success, %NULL on failure.
506  */
507 __printf(1, 4) struct workqueue_struct *
508 alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
509 
510 #ifdef CONFIG_LOCKDEP
511 /**
512  * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
513  * @fmt: printf format for the name of the workqueue
514  * @flags: WQ_* flags
515  * @max_active: max in-flight work items, 0 for default
516  * @lockdep_map: user-defined lockdep_map
517  * @...: args for @fmt
518  *
519  * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
520  * workqueues created with the same purpose and to avoid leaking a lockdep_map
521  * on each workqueue creation.
522  *
523  * RETURNS:
524  * Pointer to the allocated workqueue on success, %NULL on failure.
525  */
526 __printf(1, 5) struct workqueue_struct *
527 alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
528 			    struct lockdep_map *lockdep_map, ...);
529 
530 /**
531  * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
532  * user-defined lockdep_map
533  *
534  * @fmt: printf format for the name of the workqueue
535  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
536  * @lockdep_map: user-defined lockdep_map
537  * @args: args for @fmt
538  *
539  * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
540  * Useful for workqueues created with the same purpose and to avoid leaking a
541  * lockdep_map on each workqueue creation.
542  *
543  * RETURNS:
544  * Pointer to the allocated workqueue on success, %NULL on failure.
545  */
546 __printf(1, 4) static inline struct workqueue_struct *
547 alloc_ordered_workqueue_lockdep_map(const char *fmt, unsigned int flags,
548 				    struct lockdep_map *lockdep_map, ...)
549 {
550 	struct workqueue_struct *wq;
551 	va_list args;
552 
553 	va_start(args, lockdep_map);
554 	wq = alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | flags,
555 					 1, lockdep_map, args);
556 	va_end(args);
557 
558 	return wq;
559 }
560 #endif
561 
562 /**
563  * alloc_ordered_workqueue - allocate an ordered workqueue
564  * @fmt: printf format for the name of the workqueue
565  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
566  * @args: args for @fmt
567  *
568  * Allocate an ordered workqueue.  An ordered workqueue executes at
569  * most one work item at any given time in the queued order.  They are
570  * implemented as unbound workqueues with @max_active of one.
571  *
572  * RETURNS:
573  * Pointer to the allocated workqueue on success, %NULL on failure.
574  */
575 #define alloc_ordered_workqueue(fmt, flags, args...)			\
576 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
577 
578 #define create_workqueue(name)						\
579 	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
580 #define create_freezable_workqueue(name)				\
581 	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
582 			WQ_MEM_RECLAIM, 1, (name))
583 #define create_singlethread_workqueue(name)				\
584 	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
585 
586 #define from_work(var, callback_work, work_fieldname)	\
587 	container_of(callback_work, typeof(*var), work_fieldname)
588 
589 extern void destroy_workqueue(struct workqueue_struct *wq);
590 
591 struct workqueue_attrs *alloc_workqueue_attrs(void);
592 void free_workqueue_attrs(struct workqueue_attrs *attrs);
593 int apply_workqueue_attrs(struct workqueue_struct *wq,
594 			  const struct workqueue_attrs *attrs);
595 extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
596 
597 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
598 			struct work_struct *work);
599 extern bool queue_work_node(int node, struct workqueue_struct *wq,
600 			    struct work_struct *work);
601 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
602 			struct delayed_work *work, unsigned long delay);
603 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
604 			struct delayed_work *dwork, unsigned long delay);
605 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
606 
607 extern void __flush_workqueue(struct workqueue_struct *wq);
608 extern void drain_workqueue(struct workqueue_struct *wq);
609 
610 extern int schedule_on_each_cpu(work_func_t func);
611 
612 int execute_in_process_context(work_func_t fn, struct execute_work *);
613 
614 extern bool flush_work(struct work_struct *work);
615 extern bool cancel_work(struct work_struct *work);
616 extern bool cancel_work_sync(struct work_struct *work);
617 
618 extern bool flush_delayed_work(struct delayed_work *dwork);
619 extern bool cancel_delayed_work(struct delayed_work *dwork);
620 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
621 
622 extern bool disable_work(struct work_struct *work);
623 extern bool disable_work_sync(struct work_struct *work);
624 extern bool enable_work(struct work_struct *work);
625 
626 extern bool disable_delayed_work(struct delayed_work *dwork);
627 extern bool disable_delayed_work_sync(struct delayed_work *dwork);
628 extern bool enable_delayed_work(struct delayed_work *dwork);
629 
630 extern bool flush_rcu_work(struct rcu_work *rwork);
631 
632 extern void workqueue_set_max_active(struct workqueue_struct *wq,
633 				     int max_active);
634 extern void workqueue_set_min_active(struct workqueue_struct *wq,
635 				     int min_active);
636 extern struct work_struct *current_work(void);
637 extern bool current_is_workqueue_rescuer(void);
638 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
639 extern unsigned int work_busy(struct work_struct *work);
640 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
641 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
642 extern void show_all_workqueues(void);
643 extern void show_freezable_workqueues(void);
644 extern void show_one_workqueue(struct workqueue_struct *wq);
645 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
646 
647 /**
648  * queue_work - queue work on a workqueue
649  * @wq: workqueue to use
650  * @work: work to queue
651  *
652  * Returns %false if @work was already on a queue, %true otherwise.
653  *
654  * We queue the work to the CPU on which it was submitted, but if the CPU dies
655  * it can be processed by another CPU.
656  *
657  * Memory-ordering properties:  If it returns %true, guarantees that all stores
658  * preceding the call to queue_work() in the program order will be visible from
659  * the CPU which will execute @work by the time such work executes, e.g.,
660  *
661  * { x is initially 0 }
662  *
663  *   CPU0				CPU1
664  *
665  *   WRITE_ONCE(x, 1);			[ @work is being executed ]
666  *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x);
667  *
668  * Forbids: r0 == true && r1 == 0
669  */
670 static inline bool queue_work(struct workqueue_struct *wq,
671 			      struct work_struct *work)
672 {
673 	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
674 }
675 
676 /**
677  * queue_delayed_work - queue work on a workqueue after delay
678  * @wq: workqueue to use
679  * @dwork: delayable work to queue
680  * @delay: number of jiffies to wait before queueing
681  *
682  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
683  */
684 static inline bool queue_delayed_work(struct workqueue_struct *wq,
685 				      struct delayed_work *dwork,
686 				      unsigned long delay)
687 {
688 	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
689 }
690 
691 /**
692  * mod_delayed_work - modify delay of or queue a delayed work
693  * @wq: workqueue to use
694  * @dwork: work to queue
695  * @delay: number of jiffies to wait before queueing
696  *
697  * mod_delayed_work_on() on local CPU.
698  */
699 static inline bool mod_delayed_work(struct workqueue_struct *wq,
700 				    struct delayed_work *dwork,
701 				    unsigned long delay)
702 {
703 	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
704 }
705 
706 /**
707  * schedule_work_on - put work task on a specific cpu
708  * @cpu: cpu to put the work task on
709  * @work: job to be done
710  *
711  * This puts a job on a specific cpu
712  */
713 static inline bool schedule_work_on(int cpu, struct work_struct *work)
714 {
715 	return queue_work_on(cpu, system_wq, work);
716 }
717 
718 /**
719  * schedule_work - put work task in global workqueue
720  * @work: job to be done
721  *
722  * Returns %false if @work was already on the kernel-global workqueue and
723  * %true otherwise.
724  *
725  * This puts a job in the kernel-global workqueue if it was not already
726  * queued and leaves it in the same position on the kernel-global
727  * workqueue otherwise.
728  *
729  * Shares the same memory-ordering properties of queue_work(), cf. the
730  * DocBook header of queue_work().
731  */
732 static inline bool schedule_work(struct work_struct *work)
733 {
734 	return queue_work(system_wq, work);
735 }
736 
737 /**
738  * enable_and_queue_work - Enable and queue a work item on a specific workqueue
739  * @wq: The target workqueue
740  * @work: The work item to be enabled and queued
741  *
742  * This function combines the operations of enable_work() and queue_work(),
743  * providing a convenient way to enable and queue a work item in a single call.
744  * It invokes enable_work() on @work and then queues it if the disable depth
745  * reached 0. Returns %true if the disable depth reached 0 and @work is queued,
746  * and %false otherwise.
747  *
748  * Note that @work is always queued when disable depth reaches zero. If the
749  * desired behavior is queueing only if certain events took place while @work is
750  * disabled, the user should implement the necessary state tracking and perform
751  * explicit conditional queueing after enable_work().
752  */
753 static inline bool enable_and_queue_work(struct workqueue_struct *wq,
754 					 struct work_struct *work)
755 {
756 	if (enable_work(work)) {
757 		queue_work(wq, work);
758 		return true;
759 	}
760 	return false;
761 }
762 
763 /*
764  * Detect attempt to flush system-wide workqueues at compile time when possible.
765  * Warn attempt to flush system-wide workqueues at runtime.
766  *
767  * See https://lkml.kernel.org/r/[email protected]
768  * for reasons and steps for converting system-wide workqueues into local workqueues.
769  */
770 extern void __warn_flushing_systemwide_wq(void)
771 	__compiletime_warning("Please avoid flushing system-wide workqueues.");
772 
773 /* Please stop using this function, for this function will be removed in near future. */
774 #define flush_scheduled_work()						\
775 ({									\
776 	__warn_flushing_systemwide_wq();				\
777 	__flush_workqueue(system_wq);					\
778 })
779 
780 #define flush_workqueue(wq)						\
781 ({									\
782 	struct workqueue_struct *_wq = (wq);				\
783 									\
784 	if ((__builtin_constant_p(_wq == system_wq) &&			\
785 	     _wq == system_wq) ||					\
786 	    (__builtin_constant_p(_wq == system_highpri_wq) &&		\
787 	     _wq == system_highpri_wq) ||				\
788 	    (__builtin_constant_p(_wq == system_long_wq) &&		\
789 	     _wq == system_long_wq) ||					\
790 	    (__builtin_constant_p(_wq == system_unbound_wq) &&		\
791 	     _wq == system_unbound_wq) ||				\
792 	    (__builtin_constant_p(_wq == system_freezable_wq) &&	\
793 	     _wq == system_freezable_wq) ||				\
794 	    (__builtin_constant_p(_wq == system_power_efficient_wq) &&	\
795 	     _wq == system_power_efficient_wq) ||			\
796 	    (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
797 	     _wq == system_freezable_power_efficient_wq))		\
798 		__warn_flushing_systemwide_wq();			\
799 	__flush_workqueue(_wq);						\
800 })
801 
802 /**
803  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
804  * @cpu: cpu to use
805  * @dwork: job to be done
806  * @delay: number of jiffies to wait
807  *
808  * After waiting for a given time this puts a job in the kernel-global
809  * workqueue on the specified CPU.
810  */
811 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
812 					    unsigned long delay)
813 {
814 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
815 }
816 
817 /**
818  * schedule_delayed_work - put work task in global workqueue after delay
819  * @dwork: job to be done
820  * @delay: number of jiffies to wait or 0 for immediate execution
821  *
822  * After waiting for a given time this puts a job in the kernel-global
823  * workqueue.
824  */
825 static inline bool schedule_delayed_work(struct delayed_work *dwork,
826 					 unsigned long delay)
827 {
828 	return queue_delayed_work(system_wq, dwork, delay);
829 }
830 
831 #ifndef CONFIG_SMP
832 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
833 {
834 	return fn(arg);
835 }
836 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
837 {
838 	return fn(arg);
839 }
840 #else
841 long work_on_cpu_key(int cpu, long (*fn)(void *),
842 		     void *arg, struct lock_class_key *key);
843 /*
844  * A new key is defined for each caller to make sure the work
845  * associated with the function doesn't share its locking class.
846  */
847 #define work_on_cpu(_cpu, _fn, _arg)			\
848 ({							\
849 	static struct lock_class_key __key;		\
850 							\
851 	work_on_cpu_key(_cpu, _fn, _arg, &__key);	\
852 })
853 
854 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
855 			  void *arg, struct lock_class_key *key);
856 
857 /*
858  * A new key is defined for each caller to make sure the work
859  * associated with the function doesn't share its locking class.
860  */
861 #define work_on_cpu_safe(_cpu, _fn, _arg)		\
862 ({							\
863 	static struct lock_class_key __key;		\
864 							\
865 	work_on_cpu_safe_key(_cpu, _fn, _arg, &__key);	\
866 })
867 #endif /* CONFIG_SMP */
868 
869 #ifdef CONFIG_FREEZER
870 extern void freeze_workqueues_begin(void);
871 extern bool freeze_workqueues_busy(void);
872 extern void thaw_workqueues(void);
873 #endif /* CONFIG_FREEZER */
874 
875 #ifdef CONFIG_SYSFS
876 int workqueue_sysfs_register(struct workqueue_struct *wq);
877 #else	/* CONFIG_SYSFS */
878 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
879 { return 0; }
880 #endif	/* CONFIG_SYSFS */
881 
882 #ifdef CONFIG_WQ_WATCHDOG
883 void wq_watchdog_touch(int cpu);
884 #else	/* CONFIG_WQ_WATCHDOG */
885 static inline void wq_watchdog_touch(int cpu) { }
886 #endif	/* CONFIG_WQ_WATCHDOG */
887 
888 #ifdef CONFIG_SMP
889 int workqueue_prepare_cpu(unsigned int cpu);
890 int workqueue_online_cpu(unsigned int cpu);
891 int workqueue_offline_cpu(unsigned int cpu);
892 #endif
893 
894 void __init workqueue_init_early(void);
895 void __init workqueue_init(void);
896 void __init workqueue_init_topology(void);
897 
898 #endif
899