xref: /linux-6.15/kernel/workqueue.c (revision 07daa99b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/workqueue.c - generic async execution with shared worker pool
4  *
5  * Copyright (C) 2002		Ingo Molnar
6  *
7  *   Derived from the taskqueue/keventd code by:
8  *     David Woodhouse <[email protected]>
9  *     Andrew Morton
10  *     Kai Petzke <[email protected]>
11  *     Theodore Ts'o <[email protected]>
12  *
13  * Made to use alloc_percpu by Christoph Lameter.
14  *
15  * Copyright (C) 2010		SUSE Linux Products GmbH
16  * Copyright (C) 2010		Tejun Heo <[email protected]>
17  *
18  * This is the generic async execution mechanism.  Work items as are
19  * executed in process context.  The worker pool is shared and
20  * automatically managed.  There are two worker pools for each CPU (one for
21  * normal work items and the other for high priority ones) and some extra
22  * pools for workqueues which are not bound to any specific CPU - the
23  * number of these backing pools is dynamic.
24  *
25  * Please read Documentation/core-api/workqueue.rst for details.
26  */
27 
28 #include <linux/export.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/completion.h>
34 #include <linux/workqueue.h>
35 #include <linux/slab.h>
36 #include <linux/cpu.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/hardirq.h>
40 #include <linux/mempolicy.h>
41 #include <linux/freezer.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
45 #include <linux/jhash.h>
46 #include <linux/hashtable.h>
47 #include <linux/rculist.h>
48 #include <linux/nodemask.h>
49 #include <linux/moduleparam.h>
50 #include <linux/uaccess.h>
51 #include <linux/sched/isolation.h>
52 #include <linux/sched/debug.h>
53 #include <linux/nmi.h>
54 #include <linux/kvm_para.h>
55 #include <linux/delay.h>
56 
57 #include "workqueue_internal.h"
58 
59 enum worker_pool_flags {
60 	/*
61 	 * worker_pool flags
62 	 *
63 	 * A bound pool is either associated or disassociated with its CPU.
64 	 * While associated (!DISASSOCIATED), all workers are bound to the
65 	 * CPU and none has %WORKER_UNBOUND set and concurrency management
66 	 * is in effect.
67 	 *
68 	 * While DISASSOCIATED, the cpu may be offline and all workers have
69 	 * %WORKER_UNBOUND set and concurrency management disabled, and may
70 	 * be executing on any CPU.  The pool behaves as an unbound one.
71 	 *
72 	 * Note that DISASSOCIATED should be flipped only while holding
73 	 * wq_pool_attach_mutex to avoid changing binding state while
74 	 * worker_attach_to_pool() is in progress.
75 	 */
76 	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
77 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
78 };
79 
80 enum worker_flags {
81 	/* worker flags */
82 	WORKER_DIE		= 1 << 1,	/* die die die */
83 	WORKER_IDLE		= 1 << 2,	/* is idle */
84 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
85 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
86 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
87 	WORKER_REBOUND		= 1 << 8,	/* worker was rebound */
88 
89 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE |
90 				  WORKER_UNBOUND | WORKER_REBOUND,
91 };
92 
93 enum wq_internal_consts {
94 	NR_STD_WORKER_POOLS	= 2,		/* # standard pools per cpu */
95 
96 	UNBOUND_POOL_HASH_ORDER	= 6,		/* hashed by pool->attrs */
97 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
98 
99 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
100 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
101 
102 	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
103 						/* call for help after 10ms
104 						   (min two ticks) */
105 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
106 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
107 
108 	/*
109 	 * Rescue workers are used only on emergencies and shared by
110 	 * all cpus.  Give MIN_NICE.
111 	 */
112 	RESCUER_NICE_LEVEL	= MIN_NICE,
113 	HIGHPRI_NICE_LEVEL	= MIN_NICE,
114 
115 	WQ_NAME_LEN		= 32,
116 };
117 
118 /*
119  * Structure fields follow one of the following exclusion rules.
120  *
121  * I: Modifiable by initialization/destruction paths and read-only for
122  *    everyone else.
123  *
124  * P: Preemption protected.  Disabling preemption is enough and should
125  *    only be modified and accessed from the local cpu.
126  *
127  * L: pool->lock protected.  Access with pool->lock held.
128  *
129  * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for
130  *     reads.
131  *
132  * K: Only modified by worker while holding pool->lock. Can be safely read by
133  *    self, while holding pool->lock or from IRQ context if %current is the
134  *    kworker.
135  *
136  * S: Only modified by worker self.
137  *
138  * A: wq_pool_attach_mutex protected.
139  *
140  * PL: wq_pool_mutex protected.
141  *
142  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
143  *
144  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
145  *
146  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
147  *      RCU for reads.
148  *
149  * WQ: wq->mutex protected.
150  *
151  * WR: wq->mutex protected for writes.  RCU protected for reads.
152  *
153  * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
154  *     with READ_ONCE() without locking.
155  *
156  * MD: wq_mayday_lock protected.
157  *
158  * WD: Used internally by the watchdog.
159  */
160 
161 /* struct worker is defined in workqueue_internal.h */
162 
163 struct worker_pool {
164 	raw_spinlock_t		lock;		/* the pool lock */
165 	int			cpu;		/* I: the associated cpu */
166 	int			node;		/* I: the associated node ID */
167 	int			id;		/* I: pool ID */
168 	unsigned int		flags;		/* L: flags */
169 
170 	unsigned long		watchdog_ts;	/* L: watchdog timestamp */
171 	bool			cpu_stall;	/* WD: stalled cpu bound pool */
172 
173 	/*
174 	 * The counter is incremented in a process context on the associated CPU
175 	 * w/ preemption disabled, and decremented or reset in the same context
176 	 * but w/ pool->lock held. The readers grab pool->lock and are
177 	 * guaranteed to see if the counter reached zero.
178 	 */
179 	int			nr_running;
180 
181 	struct list_head	worklist;	/* L: list of pending works */
182 
183 	int			nr_workers;	/* L: total number of workers */
184 	int			nr_idle;	/* L: currently idle workers */
185 
186 	struct list_head	idle_list;	/* L: list of idle workers */
187 	struct timer_list	idle_timer;	/* L: worker idle timeout */
188 	struct work_struct      idle_cull_work; /* L: worker idle cleanup */
189 
190 	struct timer_list	mayday_timer;	  /* L: SOS timer for workers */
191 
192 	/* a workers is either on busy_hash or idle_list, or the manager */
193 	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
194 						/* L: hash of busy workers */
195 
196 	struct worker		*manager;	/* L: purely informational */
197 	struct list_head	workers;	/* A: attached workers */
198 	struct list_head        dying_workers;  /* A: workers about to die */
199 	struct completion	*detach_completion; /* all workers detached */
200 
201 	struct ida		worker_ida;	/* worker IDs for task name */
202 
203 	struct workqueue_attrs	*attrs;		/* I: worker attributes */
204 	struct hlist_node	hash_node;	/* PL: unbound_pool_hash node */
205 	int			refcnt;		/* PL: refcnt for unbound pools */
206 
207 	/*
208 	 * Destruction of pool is RCU protected to allow dereferences
209 	 * from get_work_pool().
210 	 */
211 	struct rcu_head		rcu;
212 };
213 
214 /*
215  * Per-pool_workqueue statistics. These can be monitored using
216  * tools/workqueue/wq_monitor.py.
217  */
218 enum pool_workqueue_stats {
219 	PWQ_STAT_STARTED,	/* work items started execution */
220 	PWQ_STAT_COMPLETED,	/* work items completed execution */
221 	PWQ_STAT_CPU_TIME,	/* total CPU time consumed */
222 	PWQ_STAT_CPU_INTENSIVE,	/* wq_cpu_intensive_thresh_us violations */
223 	PWQ_STAT_CM_WAKEUP,	/* concurrency-management worker wakeups */
224 	PWQ_STAT_REPATRIATED,	/* unbound workers brought back into scope */
225 	PWQ_STAT_MAYDAY,	/* maydays to rescuer */
226 	PWQ_STAT_RESCUED,	/* linked work items executed by rescuer */
227 
228 	PWQ_NR_STATS,
229 };
230 
231 /*
232  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
233  * of work_struct->data are used for flags and the remaining high bits
234  * point to the pwq; thus, pwqs need to be aligned at two's power of the
235  * number of flag bits.
236  */
237 struct pool_workqueue {
238 	struct worker_pool	*pool;		/* I: the associated pool */
239 	struct workqueue_struct *wq;		/* I: the owning workqueue */
240 	int			work_color;	/* L: current color */
241 	int			flush_color;	/* L: flushing color */
242 	int			refcnt;		/* L: reference count */
243 	int			nr_in_flight[WORK_NR_COLORS];
244 						/* L: nr of in_flight works */
245 
246 	/*
247 	 * nr_active management and WORK_STRUCT_INACTIVE:
248 	 *
249 	 * When pwq->nr_active >= max_active, new work item is queued to
250 	 * pwq->inactive_works instead of pool->worklist and marked with
251 	 * WORK_STRUCT_INACTIVE.
252 	 *
253 	 * All work items marked with WORK_STRUCT_INACTIVE do not participate in
254 	 * nr_active and all work items in pwq->inactive_works are marked with
255 	 * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
256 	 * in pwq->inactive_works. Some of them are ready to run in
257 	 * pool->worklist or worker->scheduled. Those work itmes are only struct
258 	 * wq_barrier which is used for flush_work() and should not participate
259 	 * in nr_active. For non-barrier work item, it is marked with
260 	 * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
261 	 */
262 	int			nr_active;	/* L: nr of active works */
263 	struct list_head	inactive_works;	/* L: inactive works */
264 	struct list_head	pending_node;	/* LN: node on wq_node_nr_active->pending_pwqs */
265 	struct list_head	pwqs_node;	/* WR: node on wq->pwqs */
266 	struct list_head	mayday_node;	/* MD: node on wq->maydays */
267 
268 	u64			stats[PWQ_NR_STATS];
269 
270 	/*
271 	 * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
272 	 * and pwq_release_workfn() for details. pool_workqueue itself is also
273 	 * RCU protected so that the first pwq can be determined without
274 	 * grabbing wq->mutex.
275 	 */
276 	struct kthread_work	release_work;
277 	struct rcu_head		rcu;
278 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
279 
280 /*
281  * Structure used to wait for workqueue flush.
282  */
283 struct wq_flusher {
284 	struct list_head	list;		/* WQ: list of flushers */
285 	int			flush_color;	/* WQ: flush color waiting for */
286 	struct completion	done;		/* flush completion */
287 };
288 
289 struct wq_device;
290 
291 /*
292  * Unlike in a per-cpu workqueue where max_active limits its concurrency level
293  * on each CPU, in an unbound workqueue, max_active applies to the whole system.
294  * As sharing a single nr_active across multiple sockets can be very expensive,
295  * the counting and enforcement is per NUMA node.
296  *
297  * The following struct is used to enforce per-node max_active. When a pwq wants
298  * to start executing a work item, it should increment ->nr using
299  * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over
300  * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish
301  * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in
302  * round-robin order.
303  */
304 struct wq_node_nr_active {
305 	int			max;		/* per-node max_active */
306 	atomic_t		nr;		/* per-node nr_active */
307 	raw_spinlock_t		lock;		/* nests inside pool locks */
308 	struct list_head	pending_pwqs;	/* LN: pwqs with inactive works */
309 };
310 
311 /*
312  * The externally visible workqueue.  It relays the issued work items to
313  * the appropriate worker_pool through its pool_workqueues.
314  */
315 struct workqueue_struct {
316 	struct list_head	pwqs;		/* WR: all pwqs of this wq */
317 	struct list_head	list;		/* PR: list of all workqueues */
318 
319 	struct mutex		mutex;		/* protects this wq */
320 	int			work_color;	/* WQ: current work color */
321 	int			flush_color;	/* WQ: current flush color */
322 	atomic_t		nr_pwqs_to_flush; /* flush in progress */
323 	struct wq_flusher	*first_flusher;	/* WQ: first flusher */
324 	struct list_head	flusher_queue;	/* WQ: flush waiters */
325 	struct list_head	flusher_overflow; /* WQ: flush overflow list */
326 
327 	struct list_head	maydays;	/* MD: pwqs requesting rescue */
328 	struct worker		*rescuer;	/* MD: rescue worker */
329 
330 	int			nr_drainers;	/* WQ: drain in progress */
331 
332 	/* See alloc_workqueue() function comment for info on min/max_active */
333 	int			max_active;	/* WO: max active works */
334 	int			min_active;	/* WO: min active works */
335 	int			saved_max_active; /* WQ: saved max_active */
336 	int			saved_min_active; /* WQ: saved min_active */
337 
338 	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
339 	struct pool_workqueue __rcu *dfl_pwq;   /* PW: only for unbound wqs */
340 
341 #ifdef CONFIG_SYSFS
342 	struct wq_device	*wq_dev;	/* I: for sysfs interface */
343 #endif
344 #ifdef CONFIG_LOCKDEP
345 	char			*lock_name;
346 	struct lock_class_key	key;
347 	struct lockdep_map	lockdep_map;
348 #endif
349 	char			name[WQ_NAME_LEN]; /* I: workqueue name */
350 
351 	/*
352 	 * Destruction of workqueue_struct is RCU protected to allow walking
353 	 * the workqueues list without grabbing wq_pool_mutex.
354 	 * This is used to dump all workqueues from sysrq.
355 	 */
356 	struct rcu_head		rcu;
357 
358 	/* hot fields used during command issue, aligned to cacheline */
359 	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
360 	struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
361 	struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
362 };
363 
364 static struct kmem_cache *pwq_cache;
365 
366 /*
367  * Each pod type describes how CPUs should be grouped for unbound workqueues.
368  * See the comment above workqueue_attrs->affn_scope.
369  */
370 struct wq_pod_type {
371 	int			nr_pods;	/* number of pods */
372 	cpumask_var_t		*pod_cpus;	/* pod -> cpus */
373 	int			*pod_node;	/* pod -> node */
374 	int			*cpu_pod;	/* cpu -> pod */
375 };
376 
377 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
378 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
379 
380 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
381 	[WQ_AFFN_DFL]			= "default",
382 	[WQ_AFFN_CPU]			= "cpu",
383 	[WQ_AFFN_SMT]			= "smt",
384 	[WQ_AFFN_CACHE]			= "cache",
385 	[WQ_AFFN_NUMA]			= "numa",
386 	[WQ_AFFN_SYSTEM]		= "system",
387 };
388 
389 /*
390  * Per-cpu work items which run for longer than the following threshold are
391  * automatically considered CPU intensive and excluded from concurrency
392  * management to prevent them from noticeably delaying other per-cpu work items.
393  * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
394  * The actual value is initialized in wq_cpu_intensive_thresh_init().
395  */
396 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
397 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
398 
399 /* see the comment above the definition of WQ_POWER_EFFICIENT */
400 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
401 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
402 
403 static bool wq_online;			/* can kworkers be created yet? */
404 
405 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
406 static struct workqueue_attrs *wq_update_pod_attrs_buf;
407 
408 static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
409 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
410 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
411 /* wait for manager to go away */
412 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
413 
414 static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
415 static bool workqueue_freezing;		/* PL: have wqs started freezing? */
416 
417 /* PL&A: allowable cpus for unbound wqs and work items */
418 static cpumask_var_t wq_unbound_cpumask;
419 
420 /* PL: user requested unbound cpumask via sysfs */
421 static cpumask_var_t wq_requested_unbound_cpumask;
422 
423 /* PL: isolated cpumask to be excluded from unbound cpumask */
424 static cpumask_var_t wq_isolated_cpumask;
425 
426 /* for further constrain wq_unbound_cpumask by cmdline parameter*/
427 static struct cpumask wq_cmdline_cpumask __initdata;
428 
429 /* CPU where unbound work was last round robin scheduled from this CPU */
430 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
431 
432 /*
433  * Local execution of unbound work items is no longer guaranteed.  The
434  * following always forces round-robin CPU selection on unbound work items
435  * to uncover usages which depend on it.
436  */
437 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
438 static bool wq_debug_force_rr_cpu = true;
439 #else
440 static bool wq_debug_force_rr_cpu = false;
441 #endif
442 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
443 
444 /* the per-cpu worker pools */
445 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
446 
447 static DEFINE_IDR(worker_pool_idr);	/* PR: idr of all pools */
448 
449 /* PL: hash of all unbound pools keyed by pool->attrs */
450 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
451 
452 /* I: attributes used when instantiating standard unbound pools on demand */
453 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
454 
455 /* I: attributes used when instantiating ordered pools on demand */
456 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
457 
458 /*
459  * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
460  * process context while holding a pool lock. Bounce to a dedicated kthread
461  * worker to avoid A-A deadlocks.
462  */
463 static struct kthread_worker *pwq_release_worker __ro_after_init;
464 
465 struct workqueue_struct *system_wq __ro_after_init;
466 EXPORT_SYMBOL(system_wq);
467 struct workqueue_struct *system_highpri_wq __ro_after_init;
468 EXPORT_SYMBOL_GPL(system_highpri_wq);
469 struct workqueue_struct *system_long_wq __ro_after_init;
470 EXPORT_SYMBOL_GPL(system_long_wq);
471 struct workqueue_struct *system_unbound_wq __ro_after_init;
472 EXPORT_SYMBOL_GPL(system_unbound_wq);
473 struct workqueue_struct *system_freezable_wq __ro_after_init;
474 EXPORT_SYMBOL_GPL(system_freezable_wq);
475 struct workqueue_struct *system_power_efficient_wq __ro_after_init;
476 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
477 struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init;
478 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
479 
480 static int worker_thread(void *__worker);
481 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
482 static void show_pwq(struct pool_workqueue *pwq);
483 static void show_one_worker_pool(struct worker_pool *pool);
484 
485 #define CREATE_TRACE_POINTS
486 #include <trace/events/workqueue.h>
487 
488 #define assert_rcu_or_pool_mutex()					\
489 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
490 			 !lockdep_is_held(&wq_pool_mutex),		\
491 			 "RCU or wq_pool_mutex should be held")
492 
493 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
494 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
495 			 !lockdep_is_held(&wq->mutex) &&		\
496 			 !lockdep_is_held(&wq_pool_mutex),		\
497 			 "RCU, wq->mutex or wq_pool_mutex should be held")
498 
499 #define for_each_cpu_worker_pool(pool, cpu)				\
500 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
501 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
502 	     (pool)++)
503 
504 /**
505  * for_each_pool - iterate through all worker_pools in the system
506  * @pool: iteration cursor
507  * @pi: integer used for iteration
508  *
509  * This must be called either with wq_pool_mutex held or RCU read
510  * locked.  If the pool needs to be used beyond the locking in effect, the
511  * caller is responsible for guaranteeing that the pool stays online.
512  *
513  * The if/else clause exists only for the lockdep assertion and can be
514  * ignored.
515  */
516 #define for_each_pool(pool, pi)						\
517 	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
518 		if (({ assert_rcu_or_pool_mutex(); false; })) { }	\
519 		else
520 
521 /**
522  * for_each_pool_worker - iterate through all workers of a worker_pool
523  * @worker: iteration cursor
524  * @pool: worker_pool to iterate workers of
525  *
526  * This must be called with wq_pool_attach_mutex.
527  *
528  * The if/else clause exists only for the lockdep assertion and can be
529  * ignored.
530  */
531 #define for_each_pool_worker(worker, pool)				\
532 	list_for_each_entry((worker), &(pool)->workers, node)		\
533 		if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
534 		else
535 
536 /**
537  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
538  * @pwq: iteration cursor
539  * @wq: the target workqueue
540  *
541  * This must be called either with wq->mutex held or RCU read locked.
542  * If the pwq needs to be used beyond the locking in effect, the caller is
543  * responsible for guaranteeing that the pwq stays online.
544  *
545  * The if/else clause exists only for the lockdep assertion and can be
546  * ignored.
547  */
548 #define for_each_pwq(pwq, wq)						\
549 	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,		\
550 				 lockdep_is_held(&(wq->mutex)))
551 
552 #ifdef CONFIG_DEBUG_OBJECTS_WORK
553 
554 static const struct debug_obj_descr work_debug_descr;
555 
556 static void *work_debug_hint(void *addr)
557 {
558 	return ((struct work_struct *) addr)->func;
559 }
560 
561 static bool work_is_static_object(void *addr)
562 {
563 	struct work_struct *work = addr;
564 
565 	return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
566 }
567 
568 /*
569  * fixup_init is called when:
570  * - an active object is initialized
571  */
572 static bool work_fixup_init(void *addr, enum debug_obj_state state)
573 {
574 	struct work_struct *work = addr;
575 
576 	switch (state) {
577 	case ODEBUG_STATE_ACTIVE:
578 		cancel_work_sync(work);
579 		debug_object_init(work, &work_debug_descr);
580 		return true;
581 	default:
582 		return false;
583 	}
584 }
585 
586 /*
587  * fixup_free is called when:
588  * - an active object is freed
589  */
590 static bool work_fixup_free(void *addr, enum debug_obj_state state)
591 {
592 	struct work_struct *work = addr;
593 
594 	switch (state) {
595 	case ODEBUG_STATE_ACTIVE:
596 		cancel_work_sync(work);
597 		debug_object_free(work, &work_debug_descr);
598 		return true;
599 	default:
600 		return false;
601 	}
602 }
603 
604 static const struct debug_obj_descr work_debug_descr = {
605 	.name		= "work_struct",
606 	.debug_hint	= work_debug_hint,
607 	.is_static_object = work_is_static_object,
608 	.fixup_init	= work_fixup_init,
609 	.fixup_free	= work_fixup_free,
610 };
611 
612 static inline void debug_work_activate(struct work_struct *work)
613 {
614 	debug_object_activate(work, &work_debug_descr);
615 }
616 
617 static inline void debug_work_deactivate(struct work_struct *work)
618 {
619 	debug_object_deactivate(work, &work_debug_descr);
620 }
621 
622 void __init_work(struct work_struct *work, int onstack)
623 {
624 	if (onstack)
625 		debug_object_init_on_stack(work, &work_debug_descr);
626 	else
627 		debug_object_init(work, &work_debug_descr);
628 }
629 EXPORT_SYMBOL_GPL(__init_work);
630 
631 void destroy_work_on_stack(struct work_struct *work)
632 {
633 	debug_object_free(work, &work_debug_descr);
634 }
635 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
636 
637 void destroy_delayed_work_on_stack(struct delayed_work *work)
638 {
639 	destroy_timer_on_stack(&work->timer);
640 	debug_object_free(&work->work, &work_debug_descr);
641 }
642 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
643 
644 #else
645 static inline void debug_work_activate(struct work_struct *work) { }
646 static inline void debug_work_deactivate(struct work_struct *work) { }
647 #endif
648 
649 /**
650  * worker_pool_assign_id - allocate ID and assign it to @pool
651  * @pool: the pool pointer of interest
652  *
653  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
654  * successfully, -errno on failure.
655  */
656 static int worker_pool_assign_id(struct worker_pool *pool)
657 {
658 	int ret;
659 
660 	lockdep_assert_held(&wq_pool_mutex);
661 
662 	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
663 			GFP_KERNEL);
664 	if (ret >= 0) {
665 		pool->id = ret;
666 		return 0;
667 	}
668 	return ret;
669 }
670 
671 static struct pool_workqueue __rcu **
672 unbound_pwq_slot(struct workqueue_struct *wq, int cpu)
673 {
674        if (cpu >= 0)
675                return per_cpu_ptr(wq->cpu_pwq, cpu);
676        else
677                return &wq->dfl_pwq;
678 }
679 
680 /* @cpu < 0 for dfl_pwq */
681 static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu)
682 {
683 	return rcu_dereference_check(*unbound_pwq_slot(wq, cpu),
684 				     lockdep_is_held(&wq_pool_mutex) ||
685 				     lockdep_is_held(&wq->mutex));
686 }
687 
688 /**
689  * unbound_effective_cpumask - effective cpumask of an unbound workqueue
690  * @wq: workqueue of interest
691  *
692  * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which
693  * is masked with wq_unbound_cpumask to determine the effective cpumask. The
694  * default pwq is always mapped to the pool with the current effective cpumask.
695  */
696 static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
697 {
698 	return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask;
699 }
700 
701 static unsigned int work_color_to_flags(int color)
702 {
703 	return color << WORK_STRUCT_COLOR_SHIFT;
704 }
705 
706 static int get_work_color(unsigned long work_data)
707 {
708 	return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
709 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
710 }
711 
712 static int work_next_color(int color)
713 {
714 	return (color + 1) % WORK_NR_COLORS;
715 }
716 
717 /*
718  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
719  * contain the pointer to the queued pwq.  Once execution starts, the flag
720  * is cleared and the high bits contain OFFQ flags and pool ID.
721  *
722  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
723  * and clear_work_data() can be used to set the pwq, pool or clear
724  * work->data.  These functions should only be called while the work is
725  * owned - ie. while the PENDING bit is set.
726  *
727  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
728  * corresponding to a work.  Pool is available once the work has been
729  * queued anywhere after initialization until it is sync canceled.  pwq is
730  * available only while the work item is queued.
731  *
732  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
733  * canceled.  While being canceled, a work item may have its PENDING set
734  * but stay off timer and worklist for arbitrarily long and nobody should
735  * try to steal the PENDING bit.
736  */
737 static inline void set_work_data(struct work_struct *work, unsigned long data,
738 				 unsigned long flags)
739 {
740 	WARN_ON_ONCE(!work_pending(work));
741 	atomic_long_set(&work->data, data | flags | work_static(work));
742 }
743 
744 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
745 			 unsigned long extra_flags)
746 {
747 	set_work_data(work, (unsigned long)pwq,
748 		      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
749 }
750 
751 static void set_work_pool_and_keep_pending(struct work_struct *work,
752 					   int pool_id)
753 {
754 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
755 		      WORK_STRUCT_PENDING);
756 }
757 
758 static void set_work_pool_and_clear_pending(struct work_struct *work,
759 					    int pool_id)
760 {
761 	/*
762 	 * The following wmb is paired with the implied mb in
763 	 * test_and_set_bit(PENDING) and ensures all updates to @work made
764 	 * here are visible to and precede any updates by the next PENDING
765 	 * owner.
766 	 */
767 	smp_wmb();
768 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
769 	/*
770 	 * The following mb guarantees that previous clear of a PENDING bit
771 	 * will not be reordered with any speculative LOADS or STORES from
772 	 * work->current_func, which is executed afterwards.  This possible
773 	 * reordering can lead to a missed execution on attempt to queue
774 	 * the same @work.  E.g. consider this case:
775 	 *
776 	 *   CPU#0                         CPU#1
777 	 *   ----------------------------  --------------------------------
778 	 *
779 	 * 1  STORE event_indicated
780 	 * 2  queue_work_on() {
781 	 * 3    test_and_set_bit(PENDING)
782 	 * 4 }                             set_..._and_clear_pending() {
783 	 * 5                                 set_work_data() # clear bit
784 	 * 6                                 smp_mb()
785 	 * 7                               work->current_func() {
786 	 * 8				      LOAD event_indicated
787 	 *				   }
788 	 *
789 	 * Without an explicit full barrier speculative LOAD on line 8 can
790 	 * be executed before CPU#0 does STORE on line 1.  If that happens,
791 	 * CPU#0 observes the PENDING bit is still set and new execution of
792 	 * a @work is not queued in a hope, that CPU#1 will eventually
793 	 * finish the queued @work.  Meanwhile CPU#1 does not see
794 	 * event_indicated is set, because speculative LOAD was executed
795 	 * before actual STORE.
796 	 */
797 	smp_mb();
798 }
799 
800 static void clear_work_data(struct work_struct *work)
801 {
802 	smp_wmb();	/* see set_work_pool_and_clear_pending() */
803 	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
804 }
805 
806 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
807 {
808 	return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
809 }
810 
811 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
812 {
813 	unsigned long data = atomic_long_read(&work->data);
814 
815 	if (data & WORK_STRUCT_PWQ)
816 		return work_struct_pwq(data);
817 	else
818 		return NULL;
819 }
820 
821 /**
822  * get_work_pool - return the worker_pool a given work was associated with
823  * @work: the work item of interest
824  *
825  * Pools are created and destroyed under wq_pool_mutex, and allows read
826  * access under RCU read lock.  As such, this function should be
827  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
828  *
829  * All fields of the returned pool are accessible as long as the above
830  * mentioned locking is in effect.  If the returned pool needs to be used
831  * beyond the critical section, the caller is responsible for ensuring the
832  * returned pool is and stays online.
833  *
834  * Return: The worker_pool @work was last associated with.  %NULL if none.
835  */
836 static struct worker_pool *get_work_pool(struct work_struct *work)
837 {
838 	unsigned long data = atomic_long_read(&work->data);
839 	int pool_id;
840 
841 	assert_rcu_or_pool_mutex();
842 
843 	if (data & WORK_STRUCT_PWQ)
844 		return work_struct_pwq(data)->pool;
845 
846 	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
847 	if (pool_id == WORK_OFFQ_POOL_NONE)
848 		return NULL;
849 
850 	return idr_find(&worker_pool_idr, pool_id);
851 }
852 
853 /**
854  * get_work_pool_id - return the worker pool ID a given work is associated with
855  * @work: the work item of interest
856  *
857  * Return: The worker_pool ID @work was last associated with.
858  * %WORK_OFFQ_POOL_NONE if none.
859  */
860 static int get_work_pool_id(struct work_struct *work)
861 {
862 	unsigned long data = atomic_long_read(&work->data);
863 
864 	if (data & WORK_STRUCT_PWQ)
865 		return work_struct_pwq(data)->pool->id;
866 
867 	return data >> WORK_OFFQ_POOL_SHIFT;
868 }
869 
870 static void mark_work_canceling(struct work_struct *work)
871 {
872 	unsigned long pool_id = get_work_pool_id(work);
873 
874 	pool_id <<= WORK_OFFQ_POOL_SHIFT;
875 	set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
876 }
877 
878 static bool work_is_canceling(struct work_struct *work)
879 {
880 	unsigned long data = atomic_long_read(&work->data);
881 
882 	return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
883 }
884 
885 /*
886  * Policy functions.  These define the policies on how the global worker
887  * pools are managed.  Unless noted otherwise, these functions assume that
888  * they're being called with pool->lock held.
889  */
890 
891 /*
892  * Need to wake up a worker?  Called from anything but currently
893  * running workers.
894  *
895  * Note that, because unbound workers never contribute to nr_running, this
896  * function will always return %true for unbound pools as long as the
897  * worklist isn't empty.
898  */
899 static bool need_more_worker(struct worker_pool *pool)
900 {
901 	return !list_empty(&pool->worklist) && !pool->nr_running;
902 }
903 
904 /* Can I start working?  Called from busy but !running workers. */
905 static bool may_start_working(struct worker_pool *pool)
906 {
907 	return pool->nr_idle;
908 }
909 
910 /* Do I need to keep working?  Called from currently running workers. */
911 static bool keep_working(struct worker_pool *pool)
912 {
913 	return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
914 }
915 
916 /* Do we need a new worker?  Called from manager. */
917 static bool need_to_create_worker(struct worker_pool *pool)
918 {
919 	return need_more_worker(pool) && !may_start_working(pool);
920 }
921 
922 /* Do we have too many workers and should some go away? */
923 static bool too_many_workers(struct worker_pool *pool)
924 {
925 	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
926 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
927 	int nr_busy = pool->nr_workers - nr_idle;
928 
929 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
930 }
931 
932 /**
933  * worker_set_flags - set worker flags and adjust nr_running accordingly
934  * @worker: self
935  * @flags: flags to set
936  *
937  * Set @flags in @worker->flags and adjust nr_running accordingly.
938  */
939 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
940 {
941 	struct worker_pool *pool = worker->pool;
942 
943 	lockdep_assert_held(&pool->lock);
944 
945 	/* If transitioning into NOT_RUNNING, adjust nr_running. */
946 	if ((flags & WORKER_NOT_RUNNING) &&
947 	    !(worker->flags & WORKER_NOT_RUNNING)) {
948 		pool->nr_running--;
949 	}
950 
951 	worker->flags |= flags;
952 }
953 
954 /**
955  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
956  * @worker: self
957  * @flags: flags to clear
958  *
959  * Clear @flags in @worker->flags and adjust nr_running accordingly.
960  */
961 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
962 {
963 	struct worker_pool *pool = worker->pool;
964 	unsigned int oflags = worker->flags;
965 
966 	lockdep_assert_held(&pool->lock);
967 
968 	worker->flags &= ~flags;
969 
970 	/*
971 	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
972 	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
973 	 * of multiple flags, not a single flag.
974 	 */
975 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
976 		if (!(worker->flags & WORKER_NOT_RUNNING))
977 			pool->nr_running++;
978 }
979 
980 /* Return the first idle worker.  Called with pool->lock held. */
981 static struct worker *first_idle_worker(struct worker_pool *pool)
982 {
983 	if (unlikely(list_empty(&pool->idle_list)))
984 		return NULL;
985 
986 	return list_first_entry(&pool->idle_list, struct worker, entry);
987 }
988 
989 /**
990  * worker_enter_idle - enter idle state
991  * @worker: worker which is entering idle state
992  *
993  * @worker is entering idle state.  Update stats and idle timer if
994  * necessary.
995  *
996  * LOCKING:
997  * raw_spin_lock_irq(pool->lock).
998  */
999 static void worker_enter_idle(struct worker *worker)
1000 {
1001 	struct worker_pool *pool = worker->pool;
1002 
1003 	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1004 	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
1005 			 (worker->hentry.next || worker->hentry.pprev)))
1006 		return;
1007 
1008 	/* can't use worker_set_flags(), also called from create_worker() */
1009 	worker->flags |= WORKER_IDLE;
1010 	pool->nr_idle++;
1011 	worker->last_active = jiffies;
1012 
1013 	/* idle_list is LIFO */
1014 	list_add(&worker->entry, &pool->idle_list);
1015 
1016 	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1017 		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1018 
1019 	/* Sanity check nr_running. */
1020 	WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
1021 }
1022 
1023 /**
1024  * worker_leave_idle - leave idle state
1025  * @worker: worker which is leaving idle state
1026  *
1027  * @worker is leaving idle state.  Update stats.
1028  *
1029  * LOCKING:
1030  * raw_spin_lock_irq(pool->lock).
1031  */
1032 static void worker_leave_idle(struct worker *worker)
1033 {
1034 	struct worker_pool *pool = worker->pool;
1035 
1036 	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1037 		return;
1038 	worker_clr_flags(worker, WORKER_IDLE);
1039 	pool->nr_idle--;
1040 	list_del_init(&worker->entry);
1041 }
1042 
1043 /**
1044  * find_worker_executing_work - find worker which is executing a work
1045  * @pool: pool of interest
1046  * @work: work to find worker for
1047  *
1048  * Find a worker which is executing @work on @pool by searching
1049  * @pool->busy_hash which is keyed by the address of @work.  For a worker
1050  * to match, its current execution should match the address of @work and
1051  * its work function.  This is to avoid unwanted dependency between
1052  * unrelated work executions through a work item being recycled while still
1053  * being executed.
1054  *
1055  * This is a bit tricky.  A work item may be freed once its execution
1056  * starts and nothing prevents the freed area from being recycled for
1057  * another work item.  If the same work item address ends up being reused
1058  * before the original execution finishes, workqueue will identify the
1059  * recycled work item as currently executing and make it wait until the
1060  * current execution finishes, introducing an unwanted dependency.
1061  *
1062  * This function checks the work item address and work function to avoid
1063  * false positives.  Note that this isn't complete as one may construct a
1064  * work function which can introduce dependency onto itself through a
1065  * recycled work item.  Well, if somebody wants to shoot oneself in the
1066  * foot that badly, there's only so much we can do, and if such deadlock
1067  * actually occurs, it should be easy to locate the culprit work function.
1068  *
1069  * CONTEXT:
1070  * raw_spin_lock_irq(pool->lock).
1071  *
1072  * Return:
1073  * Pointer to worker which is executing @work if found, %NULL
1074  * otherwise.
1075  */
1076 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1077 						 struct work_struct *work)
1078 {
1079 	struct worker *worker;
1080 
1081 	hash_for_each_possible(pool->busy_hash, worker, hentry,
1082 			       (unsigned long)work)
1083 		if (worker->current_work == work &&
1084 		    worker->current_func == work->func)
1085 			return worker;
1086 
1087 	return NULL;
1088 }
1089 
1090 /**
1091  * move_linked_works - move linked works to a list
1092  * @work: start of series of works to be scheduled
1093  * @head: target list to append @work to
1094  * @nextp: out parameter for nested worklist walking
1095  *
1096  * Schedule linked works starting from @work to @head. Work series to be
1097  * scheduled starts at @work and includes any consecutive work with
1098  * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1099  * @nextp.
1100  *
1101  * CONTEXT:
1102  * raw_spin_lock_irq(pool->lock).
1103  */
1104 static void move_linked_works(struct work_struct *work, struct list_head *head,
1105 			      struct work_struct **nextp)
1106 {
1107 	struct work_struct *n;
1108 
1109 	/*
1110 	 * Linked worklist will always end before the end of the list,
1111 	 * use NULL for list head.
1112 	 */
1113 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1114 		list_move_tail(&work->entry, head);
1115 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1116 			break;
1117 	}
1118 
1119 	/*
1120 	 * If we're already inside safe list traversal and have moved
1121 	 * multiple works to the scheduled queue, the next position
1122 	 * needs to be updated.
1123 	 */
1124 	if (nextp)
1125 		*nextp = n;
1126 }
1127 
1128 /**
1129  * assign_work - assign a work item and its linked work items to a worker
1130  * @work: work to assign
1131  * @worker: worker to assign to
1132  * @nextp: out parameter for nested worklist walking
1133  *
1134  * Assign @work and its linked work items to @worker. If @work is already being
1135  * executed by another worker in the same pool, it'll be punted there.
1136  *
1137  * If @nextp is not NULL, it's updated to point to the next work of the last
1138  * scheduled work. This allows assign_work() to be nested inside
1139  * list_for_each_entry_safe().
1140  *
1141  * Returns %true if @work was successfully assigned to @worker. %false if @work
1142  * was punted to another worker already executing it.
1143  */
1144 static bool assign_work(struct work_struct *work, struct worker *worker,
1145 			struct work_struct **nextp)
1146 {
1147 	struct worker_pool *pool = worker->pool;
1148 	struct worker *collision;
1149 
1150 	lockdep_assert_held(&pool->lock);
1151 
1152 	/*
1153 	 * A single work shouldn't be executed concurrently by multiple workers.
1154 	 * __queue_work() ensures that @work doesn't jump to a different pool
1155 	 * while still running in the previous pool. Here, we should ensure that
1156 	 * @work is not executed concurrently by multiple workers from the same
1157 	 * pool. Check whether anyone is already processing the work. If so,
1158 	 * defer the work to the currently executing one.
1159 	 */
1160 	collision = find_worker_executing_work(pool, work);
1161 	if (unlikely(collision)) {
1162 		move_linked_works(work, &collision->scheduled, nextp);
1163 		return false;
1164 	}
1165 
1166 	move_linked_works(work, &worker->scheduled, nextp);
1167 	return true;
1168 }
1169 
1170 /**
1171  * kick_pool - wake up an idle worker if necessary
1172  * @pool: pool to kick
1173  *
1174  * @pool may have pending work items. Wake up worker if necessary. Returns
1175  * whether a worker was woken up.
1176  */
1177 static bool kick_pool(struct worker_pool *pool)
1178 {
1179 	struct worker *worker = first_idle_worker(pool);
1180 	struct task_struct *p;
1181 
1182 	lockdep_assert_held(&pool->lock);
1183 
1184 	if (!need_more_worker(pool) || !worker)
1185 		return false;
1186 
1187 	p = worker->task;
1188 
1189 #ifdef CONFIG_SMP
1190 	/*
1191 	 * Idle @worker is about to execute @work and waking up provides an
1192 	 * opportunity to migrate @worker at a lower cost by setting the task's
1193 	 * wake_cpu field. Let's see if we want to move @worker to improve
1194 	 * execution locality.
1195 	 *
1196 	 * We're waking the worker that went idle the latest and there's some
1197 	 * chance that @worker is marked idle but hasn't gone off CPU yet. If
1198 	 * so, setting the wake_cpu won't do anything. As this is a best-effort
1199 	 * optimization and the race window is narrow, let's leave as-is for
1200 	 * now. If this becomes pronounced, we can skip over workers which are
1201 	 * still on cpu when picking an idle worker.
1202 	 *
1203 	 * If @pool has non-strict affinity, @worker might have ended up outside
1204 	 * its affinity scope. Repatriate.
1205 	 */
1206 	if (!pool->attrs->affn_strict &&
1207 	    !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
1208 		struct work_struct *work = list_first_entry(&pool->worklist,
1209 						struct work_struct, entry);
1210 		p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
1211 		get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
1212 	}
1213 #endif
1214 	wake_up_process(p);
1215 	return true;
1216 }
1217 
1218 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
1219 
1220 /*
1221  * Concurrency-managed per-cpu work items that hog CPU for longer than
1222  * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
1223  * which prevents them from stalling other concurrency-managed work items. If a
1224  * work function keeps triggering this mechanism, it's likely that the work item
1225  * should be using an unbound workqueue instead.
1226  *
1227  * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1228  * and report them so that they can be examined and converted to use unbound
1229  * workqueues as appropriate. To avoid flooding the console, each violating work
1230  * function is tracked and reported with exponential backoff.
1231  */
1232 #define WCI_MAX_ENTS 128
1233 
1234 struct wci_ent {
1235 	work_func_t		func;
1236 	atomic64_t		cnt;
1237 	struct hlist_node	hash_node;
1238 };
1239 
1240 static struct wci_ent wci_ents[WCI_MAX_ENTS];
1241 static int wci_nr_ents;
1242 static DEFINE_RAW_SPINLOCK(wci_lock);
1243 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
1244 
1245 static struct wci_ent *wci_find_ent(work_func_t func)
1246 {
1247 	struct wci_ent *ent;
1248 
1249 	hash_for_each_possible_rcu(wci_hash, ent, hash_node,
1250 				   (unsigned long)func) {
1251 		if (ent->func == func)
1252 			return ent;
1253 	}
1254 	return NULL;
1255 }
1256 
1257 static void wq_cpu_intensive_report(work_func_t func)
1258 {
1259 	struct wci_ent *ent;
1260 
1261 restart:
1262 	ent = wci_find_ent(func);
1263 	if (ent) {
1264 		u64 cnt;
1265 
1266 		/*
1267 		 * Start reporting from the fourth time and back off
1268 		 * exponentially.
1269 		 */
1270 		cnt = atomic64_inc_return_relaxed(&ent->cnt);
1271 		if (cnt >= 4 && is_power_of_2(cnt))
1272 			printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
1273 					ent->func, wq_cpu_intensive_thresh_us,
1274 					atomic64_read(&ent->cnt));
1275 		return;
1276 	}
1277 
1278 	/*
1279 	 * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
1280 	 * is exhausted, something went really wrong and we probably made enough
1281 	 * noise already.
1282 	 */
1283 	if (wci_nr_ents >= WCI_MAX_ENTS)
1284 		return;
1285 
1286 	raw_spin_lock(&wci_lock);
1287 
1288 	if (wci_nr_ents >= WCI_MAX_ENTS) {
1289 		raw_spin_unlock(&wci_lock);
1290 		return;
1291 	}
1292 
1293 	if (wci_find_ent(func)) {
1294 		raw_spin_unlock(&wci_lock);
1295 		goto restart;
1296 	}
1297 
1298 	ent = &wci_ents[wci_nr_ents++];
1299 	ent->func = func;
1300 	atomic64_set(&ent->cnt, 1);
1301 	hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
1302 
1303 	raw_spin_unlock(&wci_lock);
1304 }
1305 
1306 #else	/* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1307 static void wq_cpu_intensive_report(work_func_t func) {}
1308 #endif	/* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1309 
1310 /**
1311  * wq_worker_running - a worker is running again
1312  * @task: task waking up
1313  *
1314  * This function is called when a worker returns from schedule()
1315  */
1316 void wq_worker_running(struct task_struct *task)
1317 {
1318 	struct worker *worker = kthread_data(task);
1319 
1320 	if (!READ_ONCE(worker->sleeping))
1321 		return;
1322 
1323 	/*
1324 	 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1325 	 * and the nr_running increment below, we may ruin the nr_running reset
1326 	 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1327 	 * pool. Protect against such race.
1328 	 */
1329 	preempt_disable();
1330 	if (!(worker->flags & WORKER_NOT_RUNNING))
1331 		worker->pool->nr_running++;
1332 	preempt_enable();
1333 
1334 	/*
1335 	 * CPU intensive auto-detection cares about how long a work item hogged
1336 	 * CPU without sleeping. Reset the starting timestamp on wakeup.
1337 	 */
1338 	worker->current_at = worker->task->se.sum_exec_runtime;
1339 
1340 	WRITE_ONCE(worker->sleeping, 0);
1341 }
1342 
1343 /**
1344  * wq_worker_sleeping - a worker is going to sleep
1345  * @task: task going to sleep
1346  *
1347  * This function is called from schedule() when a busy worker is
1348  * going to sleep.
1349  */
1350 void wq_worker_sleeping(struct task_struct *task)
1351 {
1352 	struct worker *worker = kthread_data(task);
1353 	struct worker_pool *pool;
1354 
1355 	/*
1356 	 * Rescuers, which may not have all the fields set up like normal
1357 	 * workers, also reach here, let's not access anything before
1358 	 * checking NOT_RUNNING.
1359 	 */
1360 	if (worker->flags & WORKER_NOT_RUNNING)
1361 		return;
1362 
1363 	pool = worker->pool;
1364 
1365 	/* Return if preempted before wq_worker_running() was reached */
1366 	if (READ_ONCE(worker->sleeping))
1367 		return;
1368 
1369 	WRITE_ONCE(worker->sleeping, 1);
1370 	raw_spin_lock_irq(&pool->lock);
1371 
1372 	/*
1373 	 * Recheck in case unbind_workers() preempted us. We don't
1374 	 * want to decrement nr_running after the worker is unbound
1375 	 * and nr_running has been reset.
1376 	 */
1377 	if (worker->flags & WORKER_NOT_RUNNING) {
1378 		raw_spin_unlock_irq(&pool->lock);
1379 		return;
1380 	}
1381 
1382 	pool->nr_running--;
1383 	if (kick_pool(pool))
1384 		worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1385 
1386 	raw_spin_unlock_irq(&pool->lock);
1387 }
1388 
1389 /**
1390  * wq_worker_tick - a scheduler tick occurred while a kworker is running
1391  * @task: task currently running
1392  *
1393  * Called from scheduler_tick(). We're in the IRQ context and the current
1394  * worker's fields which follow the 'K' locking rule can be accessed safely.
1395  */
1396 void wq_worker_tick(struct task_struct *task)
1397 {
1398 	struct worker *worker = kthread_data(task);
1399 	struct pool_workqueue *pwq = worker->current_pwq;
1400 	struct worker_pool *pool = worker->pool;
1401 
1402 	if (!pwq)
1403 		return;
1404 
1405 	pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
1406 
1407 	if (!wq_cpu_intensive_thresh_us)
1408 		return;
1409 
1410 	/*
1411 	 * If the current worker is concurrency managed and hogged the CPU for
1412 	 * longer than wq_cpu_intensive_thresh_us, it's automatically marked
1413 	 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
1414 	 *
1415 	 * Set @worker->sleeping means that @worker is in the process of
1416 	 * switching out voluntarily and won't be contributing to
1417 	 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1418 	 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
1419 	 * double decrements. The task is releasing the CPU anyway. Let's skip.
1420 	 * We probably want to make this prettier in the future.
1421 	 */
1422 	if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
1423 	    worker->task->se.sum_exec_runtime - worker->current_at <
1424 	    wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
1425 		return;
1426 
1427 	raw_spin_lock(&pool->lock);
1428 
1429 	worker_set_flags(worker, WORKER_CPU_INTENSIVE);
1430 	wq_cpu_intensive_report(worker->current_func);
1431 	pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
1432 
1433 	if (kick_pool(pool))
1434 		pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1435 
1436 	raw_spin_unlock(&pool->lock);
1437 }
1438 
1439 /**
1440  * wq_worker_last_func - retrieve worker's last work function
1441  * @task: Task to retrieve last work function of.
1442  *
1443  * Determine the last function a worker executed. This is called from
1444  * the scheduler to get a worker's last known identity.
1445  *
1446  * CONTEXT:
1447  * raw_spin_lock_irq(rq->lock)
1448  *
1449  * This function is called during schedule() when a kworker is going
1450  * to sleep. It's used by psi to identify aggregation workers during
1451  * dequeuing, to allow periodic aggregation to shut-off when that
1452  * worker is the last task in the system or cgroup to go to sleep.
1453  *
1454  * As this function doesn't involve any workqueue-related locking, it
1455  * only returns stable values when called from inside the scheduler's
1456  * queuing and dequeuing paths, when @task, which must be a kworker,
1457  * is guaranteed to not be processing any works.
1458  *
1459  * Return:
1460  * The last work function %current executed as a worker, NULL if it
1461  * hasn't executed any work yet.
1462  */
1463 work_func_t wq_worker_last_func(struct task_struct *task)
1464 {
1465 	struct worker *worker = kthread_data(task);
1466 
1467 	return worker->last_func;
1468 }
1469 
1470 /**
1471  * wq_node_nr_active - Determine wq_node_nr_active to use
1472  * @wq: workqueue of interest
1473  * @node: NUMA node, can be %NUMA_NO_NODE
1474  *
1475  * Determine wq_node_nr_active to use for @wq on @node. Returns:
1476  *
1477  * - %NULL for per-cpu workqueues as they don't need to use shared nr_active.
1478  *
1479  * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE.
1480  *
1481  * - Otherwise, node_nr_active[@node].
1482  */
1483 static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq,
1484 						   int node)
1485 {
1486 	if (!(wq->flags & WQ_UNBOUND))
1487 		return NULL;
1488 
1489 	if (node == NUMA_NO_NODE)
1490 		node = nr_node_ids;
1491 
1492 	return wq->node_nr_active[node];
1493 }
1494 
1495 /**
1496  * wq_update_node_max_active - Update per-node max_actives to use
1497  * @wq: workqueue to update
1498  * @off_cpu: CPU that's going down, -1 if a CPU is not going down
1499  *
1500  * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is
1501  * distributed among nodes according to the proportions of numbers of online
1502  * cpus. The result is always between @wq->min_active and max_active.
1503  */
1504 static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
1505 {
1506 	struct cpumask *effective = unbound_effective_cpumask(wq);
1507 	int min_active = READ_ONCE(wq->min_active);
1508 	int max_active = READ_ONCE(wq->max_active);
1509 	int total_cpus, node;
1510 
1511 	lockdep_assert_held(&wq->mutex);
1512 
1513 	if (!cpumask_test_cpu(off_cpu, effective))
1514 		off_cpu = -1;
1515 
1516 	total_cpus = cpumask_weight_and(effective, cpu_online_mask);
1517 	if (off_cpu >= 0)
1518 		total_cpus--;
1519 
1520 	for_each_node(node) {
1521 		int node_cpus;
1522 
1523 		node_cpus = cpumask_weight_and(effective, cpumask_of_node(node));
1524 		if (off_cpu >= 0 && cpu_to_node(off_cpu) == node)
1525 			node_cpus--;
1526 
1527 		wq_node_nr_active(wq, node)->max =
1528 			clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus),
1529 			      min_active, max_active);
1530 	}
1531 
1532 	wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
1533 }
1534 
1535 /**
1536  * get_pwq - get an extra reference on the specified pool_workqueue
1537  * @pwq: pool_workqueue to get
1538  *
1539  * Obtain an extra reference on @pwq.  The caller should guarantee that
1540  * @pwq has positive refcnt and be holding the matching pool->lock.
1541  */
1542 static void get_pwq(struct pool_workqueue *pwq)
1543 {
1544 	lockdep_assert_held(&pwq->pool->lock);
1545 	WARN_ON_ONCE(pwq->refcnt <= 0);
1546 	pwq->refcnt++;
1547 }
1548 
1549 /**
1550  * put_pwq - put a pool_workqueue reference
1551  * @pwq: pool_workqueue to put
1552  *
1553  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1554  * destruction.  The caller should be holding the matching pool->lock.
1555  */
1556 static void put_pwq(struct pool_workqueue *pwq)
1557 {
1558 	lockdep_assert_held(&pwq->pool->lock);
1559 	if (likely(--pwq->refcnt))
1560 		return;
1561 	/*
1562 	 * @pwq can't be released under pool->lock, bounce to a dedicated
1563 	 * kthread_worker to avoid A-A deadlocks.
1564 	 */
1565 	kthread_queue_work(pwq_release_worker, &pwq->release_work);
1566 }
1567 
1568 /**
1569  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1570  * @pwq: pool_workqueue to put (can be %NULL)
1571  *
1572  * put_pwq() with locking.  This function also allows %NULL @pwq.
1573  */
1574 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1575 {
1576 	if (pwq) {
1577 		/*
1578 		 * As both pwqs and pools are RCU protected, the
1579 		 * following lock operations are safe.
1580 		 */
1581 		raw_spin_lock_irq(&pwq->pool->lock);
1582 		put_pwq(pwq);
1583 		raw_spin_unlock_irq(&pwq->pool->lock);
1584 	}
1585 }
1586 
1587 static bool pwq_is_empty(struct pool_workqueue *pwq)
1588 {
1589 	return !pwq->nr_active && list_empty(&pwq->inactive_works);
1590 }
1591 
1592 static void __pwq_activate_work(struct pool_workqueue *pwq,
1593 				struct work_struct *work)
1594 {
1595 	unsigned long *wdb = work_data_bits(work);
1596 
1597 	WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE));
1598 	trace_workqueue_activate_work(work);
1599 	if (list_empty(&pwq->pool->worklist))
1600 		pwq->pool->watchdog_ts = jiffies;
1601 	move_linked_works(work, &pwq->pool->worklist, NULL);
1602 	__clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
1603 }
1604 
1605 /**
1606  * pwq_activate_work - Activate a work item if inactive
1607  * @pwq: pool_workqueue @work belongs to
1608  * @work: work item to activate
1609  *
1610  * Returns %true if activated. %false if already active.
1611  */
1612 static bool pwq_activate_work(struct pool_workqueue *pwq,
1613 			      struct work_struct *work)
1614 {
1615 	struct worker_pool *pool = pwq->pool;
1616 	struct wq_node_nr_active *nna;
1617 
1618 	lockdep_assert_held(&pool->lock);
1619 
1620 	if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE))
1621 		return false;
1622 
1623 	nna = wq_node_nr_active(pwq->wq, pool->node);
1624 	if (nna)
1625 		atomic_inc(&nna->nr);
1626 
1627 	pwq->nr_active++;
1628 	__pwq_activate_work(pwq, work);
1629 	return true;
1630 }
1631 
1632 static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
1633 {
1634 	int max = READ_ONCE(nna->max);
1635 
1636 	while (true) {
1637 		int old, tmp;
1638 
1639 		old = atomic_read(&nna->nr);
1640 		if (old >= max)
1641 			return false;
1642 		tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1);
1643 		if (tmp == old)
1644 			return true;
1645 	}
1646 }
1647 
1648 /**
1649  * pwq_tryinc_nr_active - Try to increment nr_active for a pwq
1650  * @pwq: pool_workqueue of interest
1651  * @fill: max_active may have increased, try to increase concurrency level
1652  *
1653  * Try to increment nr_active for @pwq. Returns %true if an nr_active count is
1654  * successfully obtained. %false otherwise.
1655  */
1656 static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill)
1657 {
1658 	struct workqueue_struct *wq = pwq->wq;
1659 	struct worker_pool *pool = pwq->pool;
1660 	struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node);
1661 	bool obtained = false;
1662 
1663 	lockdep_assert_held(&pool->lock);
1664 
1665 	if (!nna) {
1666 		/* per-cpu workqueue, pwq->nr_active is sufficient */
1667 		obtained = pwq->nr_active < READ_ONCE(wq->max_active);
1668 		goto out;
1669 	}
1670 
1671 	/*
1672 	 * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is
1673 	 * already waiting on $nna, pwq_dec_nr_active() will maintain the
1674 	 * concurrency level. Don't jump the line.
1675 	 *
1676 	 * We need to ignore the pending test after max_active has increased as
1677 	 * pwq_dec_nr_active() can only maintain the concurrency level but not
1678 	 * increase it. This is indicated by @fill.
1679 	 */
1680 	if (!list_empty(&pwq->pending_node) && likely(!fill))
1681 		goto out;
1682 
1683 	obtained = tryinc_node_nr_active(nna);
1684 	if (obtained)
1685 		goto out;
1686 
1687 	/*
1688 	 * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs
1689 	 * and try again. The smp_mb() is paired with the implied memory barrier
1690 	 * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either
1691 	 * we see the decremented $nna->nr or they see non-empty
1692 	 * $nna->pending_pwqs.
1693 	 */
1694 	raw_spin_lock(&nna->lock);
1695 
1696 	if (list_empty(&pwq->pending_node))
1697 		list_add_tail(&pwq->pending_node, &nna->pending_pwqs);
1698 	else if (likely(!fill))
1699 		goto out_unlock;
1700 
1701 	smp_mb();
1702 
1703 	obtained = tryinc_node_nr_active(nna);
1704 
1705 	/*
1706 	 * If @fill, @pwq might have already been pending. Being spuriously
1707 	 * pending in cold paths doesn't affect anything. Let's leave it be.
1708 	 */
1709 	if (obtained && likely(!fill))
1710 		list_del_init(&pwq->pending_node);
1711 
1712 out_unlock:
1713 	raw_spin_unlock(&nna->lock);
1714 out:
1715 	if (obtained)
1716 		pwq->nr_active++;
1717 	return obtained;
1718 }
1719 
1720 /**
1721  * pwq_activate_first_inactive - Activate the first inactive work item on a pwq
1722  * @pwq: pool_workqueue of interest
1723  * @fill: max_active may have increased, try to increase concurrency level
1724  *
1725  * Activate the first inactive work item of @pwq if available and allowed by
1726  * max_active limit.
1727  *
1728  * Returns %true if an inactive work item has been activated. %false if no
1729  * inactive work item is found or max_active limit is reached.
1730  */
1731 static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill)
1732 {
1733 	struct work_struct *work =
1734 		list_first_entry_or_null(&pwq->inactive_works,
1735 					 struct work_struct, entry);
1736 
1737 	if (work && pwq_tryinc_nr_active(pwq, fill)) {
1738 		__pwq_activate_work(pwq, work);
1739 		return true;
1740 	} else {
1741 		return false;
1742 	}
1743 }
1744 
1745 /**
1746  * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active
1747  * @nna: wq_node_nr_active to activate a pending pwq for
1748  * @caller_pool: worker_pool the caller is locking
1749  *
1750  * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked.
1751  * @caller_pool may be unlocked and relocked to lock other worker_pools.
1752  */
1753 static void node_activate_pending_pwq(struct wq_node_nr_active *nna,
1754 				      struct worker_pool *caller_pool)
1755 {
1756 	struct worker_pool *locked_pool = caller_pool;
1757 	struct pool_workqueue *pwq;
1758 	struct work_struct *work;
1759 
1760 	lockdep_assert_held(&caller_pool->lock);
1761 
1762 	raw_spin_lock(&nna->lock);
1763 retry:
1764 	pwq = list_first_entry_or_null(&nna->pending_pwqs,
1765 				       struct pool_workqueue, pending_node);
1766 	if (!pwq)
1767 		goto out_unlock;
1768 
1769 	/*
1770 	 * If @pwq is for a different pool than @locked_pool, we need to lock
1771 	 * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock
1772 	 * / lock dance. For that, we also need to release @nna->lock as it's
1773 	 * nested inside pool locks.
1774 	 */
1775 	if (pwq->pool != locked_pool) {
1776 		raw_spin_unlock(&locked_pool->lock);
1777 		locked_pool = pwq->pool;
1778 		if (!raw_spin_trylock(&locked_pool->lock)) {
1779 			raw_spin_unlock(&nna->lock);
1780 			raw_spin_lock(&locked_pool->lock);
1781 			raw_spin_lock(&nna->lock);
1782 			goto retry;
1783 		}
1784 	}
1785 
1786 	/*
1787 	 * $pwq may not have any inactive work items due to e.g. cancellations.
1788 	 * Drop it from pending_pwqs and see if there's another one.
1789 	 */
1790 	work = list_first_entry_or_null(&pwq->inactive_works,
1791 					struct work_struct, entry);
1792 	if (!work) {
1793 		list_del_init(&pwq->pending_node);
1794 		goto retry;
1795 	}
1796 
1797 	/*
1798 	 * Acquire an nr_active count and activate the inactive work item. If
1799 	 * $pwq still has inactive work items, rotate it to the end of the
1800 	 * pending_pwqs so that we round-robin through them. This means that
1801 	 * inactive work items are not activated in queueing order which is fine
1802 	 * given that there has never been any ordering across different pwqs.
1803 	 */
1804 	if (likely(tryinc_node_nr_active(nna))) {
1805 		pwq->nr_active++;
1806 		__pwq_activate_work(pwq, work);
1807 
1808 		if (list_empty(&pwq->inactive_works))
1809 			list_del_init(&pwq->pending_node);
1810 		else
1811 			list_move_tail(&pwq->pending_node, &nna->pending_pwqs);
1812 
1813 		/* if activating a foreign pool, make sure it's running */
1814 		if (pwq->pool != caller_pool)
1815 			kick_pool(pwq->pool);
1816 	}
1817 
1818 out_unlock:
1819 	raw_spin_unlock(&nna->lock);
1820 	if (locked_pool != caller_pool) {
1821 		raw_spin_unlock(&locked_pool->lock);
1822 		raw_spin_lock(&caller_pool->lock);
1823 	}
1824 }
1825 
1826 /**
1827  * pwq_dec_nr_active - Retire an active count
1828  * @pwq: pool_workqueue of interest
1829  *
1830  * Decrement @pwq's nr_active and try to activate the first inactive work item.
1831  * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
1832  */
1833 static void pwq_dec_nr_active(struct pool_workqueue *pwq)
1834 {
1835 	struct worker_pool *pool = pwq->pool;
1836 	struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node);
1837 
1838 	lockdep_assert_held(&pool->lock);
1839 
1840 	/*
1841 	 * @pwq->nr_active should be decremented for both percpu and unbound
1842 	 * workqueues.
1843 	 */
1844 	pwq->nr_active--;
1845 
1846 	/*
1847 	 * For a percpu workqueue, it's simple. Just need to kick the first
1848 	 * inactive work item on @pwq itself.
1849 	 */
1850 	if (!nna) {
1851 		pwq_activate_first_inactive(pwq, false);
1852 		return;
1853 	}
1854 
1855 	/*
1856 	 * If @pwq is for an unbound workqueue, it's more complicated because
1857 	 * multiple pwqs and pools may be sharing the nr_active count. When a
1858 	 * pwq needs to wait for an nr_active count, it puts itself on
1859 	 * $nna->pending_pwqs. The following atomic_dec_return()'s implied
1860 	 * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to
1861 	 * guarantee that either we see non-empty pending_pwqs or they see
1862 	 * decremented $nna->nr.
1863 	 *
1864 	 * $nna->max may change as CPUs come online/offline and @pwq->wq's
1865 	 * max_active gets updated. However, it is guaranteed to be equal to or
1866 	 * larger than @pwq->wq->min_active which is above zero unless freezing.
1867 	 * This maintains the forward progress guarantee.
1868 	 */
1869 	if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max))
1870 		return;
1871 
1872 	if (!list_empty(&nna->pending_pwqs))
1873 		node_activate_pending_pwq(nna, pool);
1874 }
1875 
1876 /**
1877  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1878  * @pwq: pwq of interest
1879  * @work_data: work_data of work which left the queue
1880  *
1881  * A work either has completed or is removed from pending queue,
1882  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1883  *
1884  * NOTE:
1885  * For unbound workqueues, this function may temporarily drop @pwq->pool->lock
1886  * and thus should be called after all other state updates for the in-flight
1887  * work item is complete.
1888  *
1889  * CONTEXT:
1890  * raw_spin_lock_irq(pool->lock).
1891  */
1892 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1893 {
1894 	int color = get_work_color(work_data);
1895 
1896 	if (!(work_data & WORK_STRUCT_INACTIVE))
1897 		pwq_dec_nr_active(pwq);
1898 
1899 	pwq->nr_in_flight[color]--;
1900 
1901 	/* is flush in progress and are we at the flushing tip? */
1902 	if (likely(pwq->flush_color != color))
1903 		goto out_put;
1904 
1905 	/* are there still in-flight works? */
1906 	if (pwq->nr_in_flight[color])
1907 		goto out_put;
1908 
1909 	/* this pwq is done, clear flush_color */
1910 	pwq->flush_color = -1;
1911 
1912 	/*
1913 	 * If this was the last pwq, wake up the first flusher.  It
1914 	 * will handle the rest.
1915 	 */
1916 	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1917 		complete(&pwq->wq->first_flusher->done);
1918 out_put:
1919 	put_pwq(pwq);
1920 }
1921 
1922 /**
1923  * try_to_grab_pending - steal work item from worklist and disable irq
1924  * @work: work item to steal
1925  * @is_dwork: @work is a delayed_work
1926  * @flags: place to store irq state
1927  *
1928  * Try to grab PENDING bit of @work.  This function can handle @work in any
1929  * stable state - idle, on timer or on worklist.
1930  *
1931  * Return:
1932  *
1933  *  ========	================================================================
1934  *  1		if @work was pending and we successfully stole PENDING
1935  *  0		if @work was idle and we claimed PENDING
1936  *  -EAGAIN	if PENDING couldn't be grabbed at the moment, safe to busy-retry
1937  *  -ENOENT	if someone else is canceling @work, this state may persist
1938  *		for arbitrarily long
1939  *  ========	================================================================
1940  *
1941  * Note:
1942  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1943  * interrupted while holding PENDING and @work off queue, irq must be
1944  * disabled on entry.  This, combined with delayed_work->timer being
1945  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1946  *
1947  * On successful return, >= 0, irq is disabled and the caller is
1948  * responsible for releasing it using local_irq_restore(*@flags).
1949  *
1950  * This function is safe to call from any context including IRQ handler.
1951  */
1952 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1953 			       unsigned long *flags)
1954 {
1955 	struct worker_pool *pool;
1956 	struct pool_workqueue *pwq;
1957 
1958 	local_irq_save(*flags);
1959 
1960 	/* try to steal the timer if it exists */
1961 	if (is_dwork) {
1962 		struct delayed_work *dwork = to_delayed_work(work);
1963 
1964 		/*
1965 		 * dwork->timer is irqsafe.  If del_timer() fails, it's
1966 		 * guaranteed that the timer is not queued anywhere and not
1967 		 * running on the local CPU.
1968 		 */
1969 		if (likely(del_timer(&dwork->timer)))
1970 			return 1;
1971 	}
1972 
1973 	/* try to claim PENDING the normal way */
1974 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1975 		return 0;
1976 
1977 	rcu_read_lock();
1978 	/*
1979 	 * The queueing is in progress, or it is already queued. Try to
1980 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1981 	 */
1982 	pool = get_work_pool(work);
1983 	if (!pool)
1984 		goto fail;
1985 
1986 	raw_spin_lock(&pool->lock);
1987 	/*
1988 	 * work->data is guaranteed to point to pwq only while the work
1989 	 * item is queued on pwq->wq, and both updating work->data to point
1990 	 * to pwq on queueing and to pool on dequeueing are done under
1991 	 * pwq->pool->lock.  This in turn guarantees that, if work->data
1992 	 * points to pwq which is associated with a locked pool, the work
1993 	 * item is currently queued on that pool.
1994 	 */
1995 	pwq = get_work_pwq(work);
1996 	if (pwq && pwq->pool == pool) {
1997 		debug_work_deactivate(work);
1998 
1999 		/*
2000 		 * A cancelable inactive work item must be in the
2001 		 * pwq->inactive_works since a queued barrier can't be
2002 		 * canceled (see the comments in insert_wq_barrier()).
2003 		 *
2004 		 * An inactive work item cannot be grabbed directly because
2005 		 * it might have linked barrier work items which, if left
2006 		 * on the inactive_works list, will confuse pwq->nr_active
2007 		 * management later on and cause stall.  Make sure the work
2008 		 * item is activated before grabbing.
2009 		 */
2010 		pwq_activate_work(pwq, work);
2011 
2012 		list_del_init(&work->entry);
2013 
2014 		/* work->data points to pwq iff queued, point to pool */
2015 		set_work_pool_and_keep_pending(work, pool->id);
2016 
2017 		/* must be the last step, see the function comment */
2018 		pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
2019 
2020 		raw_spin_unlock(&pool->lock);
2021 		rcu_read_unlock();
2022 		return 1;
2023 	}
2024 	raw_spin_unlock(&pool->lock);
2025 fail:
2026 	rcu_read_unlock();
2027 	local_irq_restore(*flags);
2028 	if (work_is_canceling(work))
2029 		return -ENOENT;
2030 	cpu_relax();
2031 	return -EAGAIN;
2032 }
2033 
2034 /**
2035  * insert_work - insert a work into a pool
2036  * @pwq: pwq @work belongs to
2037  * @work: work to insert
2038  * @head: insertion point
2039  * @extra_flags: extra WORK_STRUCT_* flags to set
2040  *
2041  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
2042  * work_struct flags.
2043  *
2044  * CONTEXT:
2045  * raw_spin_lock_irq(pool->lock).
2046  */
2047 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
2048 			struct list_head *head, unsigned int extra_flags)
2049 {
2050 	debug_work_activate(work);
2051 
2052 	/* record the work call stack in order to print it in KASAN reports */
2053 	kasan_record_aux_stack_noalloc(work);
2054 
2055 	/* we own @work, set data and link */
2056 	set_work_pwq(work, pwq, extra_flags);
2057 	list_add_tail(&work->entry, head);
2058 	get_pwq(pwq);
2059 }
2060 
2061 /*
2062  * Test whether @work is being queued from another work executing on the
2063  * same workqueue.
2064  */
2065 static bool is_chained_work(struct workqueue_struct *wq)
2066 {
2067 	struct worker *worker;
2068 
2069 	worker = current_wq_worker();
2070 	/*
2071 	 * Return %true iff I'm a worker executing a work item on @wq.  If
2072 	 * I'm @worker, it's safe to dereference it without locking.
2073 	 */
2074 	return worker && worker->current_pwq->wq == wq;
2075 }
2076 
2077 /*
2078  * When queueing an unbound work item to a wq, prefer local CPU if allowed
2079  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
2080  * avoid perturbing sensitive tasks.
2081  */
2082 static int wq_select_unbound_cpu(int cpu)
2083 {
2084 	int new_cpu;
2085 
2086 	if (likely(!wq_debug_force_rr_cpu)) {
2087 		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
2088 			return cpu;
2089 	} else {
2090 		pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
2091 	}
2092 
2093 	new_cpu = __this_cpu_read(wq_rr_cpu_last);
2094 	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
2095 	if (unlikely(new_cpu >= nr_cpu_ids)) {
2096 		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
2097 		if (unlikely(new_cpu >= nr_cpu_ids))
2098 			return cpu;
2099 	}
2100 	__this_cpu_write(wq_rr_cpu_last, new_cpu);
2101 
2102 	return new_cpu;
2103 }
2104 
2105 static void __queue_work(int cpu, struct workqueue_struct *wq,
2106 			 struct work_struct *work)
2107 {
2108 	struct pool_workqueue *pwq;
2109 	struct worker_pool *last_pool, *pool;
2110 	unsigned int work_flags;
2111 	unsigned int req_cpu = cpu;
2112 
2113 	/*
2114 	 * While a work item is PENDING && off queue, a task trying to
2115 	 * steal the PENDING will busy-loop waiting for it to either get
2116 	 * queued or lose PENDING.  Grabbing PENDING and queueing should
2117 	 * happen with IRQ disabled.
2118 	 */
2119 	lockdep_assert_irqs_disabled();
2120 
2121 
2122 	/*
2123 	 * For a draining wq, only works from the same workqueue are
2124 	 * allowed. The __WQ_DESTROYING helps to spot the issue that
2125 	 * queues a new work item to a wq after destroy_workqueue(wq).
2126 	 */
2127 	if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
2128 		     WARN_ON_ONCE(!is_chained_work(wq))))
2129 		return;
2130 	rcu_read_lock();
2131 retry:
2132 	/* pwq which will be used unless @work is executing elsewhere */
2133 	if (req_cpu == WORK_CPU_UNBOUND) {
2134 		if (wq->flags & WQ_UNBOUND)
2135 			cpu = wq_select_unbound_cpu(raw_smp_processor_id());
2136 		else
2137 			cpu = raw_smp_processor_id();
2138 	}
2139 
2140 	pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
2141 	pool = pwq->pool;
2142 
2143 	/*
2144 	 * If @work was previously on a different pool, it might still be
2145 	 * running there, in which case the work needs to be queued on that
2146 	 * pool to guarantee non-reentrancy.
2147 	 */
2148 	last_pool = get_work_pool(work);
2149 	if (last_pool && last_pool != pool) {
2150 		struct worker *worker;
2151 
2152 		raw_spin_lock(&last_pool->lock);
2153 
2154 		worker = find_worker_executing_work(last_pool, work);
2155 
2156 		if (worker && worker->current_pwq->wq == wq) {
2157 			pwq = worker->current_pwq;
2158 			pool = pwq->pool;
2159 			WARN_ON_ONCE(pool != last_pool);
2160 		} else {
2161 			/* meh... not running there, queue here */
2162 			raw_spin_unlock(&last_pool->lock);
2163 			raw_spin_lock(&pool->lock);
2164 		}
2165 	} else {
2166 		raw_spin_lock(&pool->lock);
2167 	}
2168 
2169 	/*
2170 	 * pwq is determined and locked. For unbound pools, we could have raced
2171 	 * with pwq release and it could already be dead. If its refcnt is zero,
2172 	 * repeat pwq selection. Note that unbound pwqs never die without
2173 	 * another pwq replacing it in cpu_pwq or while work items are executing
2174 	 * on it, so the retrying is guaranteed to make forward-progress.
2175 	 */
2176 	if (unlikely(!pwq->refcnt)) {
2177 		if (wq->flags & WQ_UNBOUND) {
2178 			raw_spin_unlock(&pool->lock);
2179 			cpu_relax();
2180 			goto retry;
2181 		}
2182 		/* oops */
2183 		WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
2184 			  wq->name, cpu);
2185 	}
2186 
2187 	/* pwq determined, queue */
2188 	trace_workqueue_queue_work(req_cpu, pwq, work);
2189 
2190 	if (WARN_ON(!list_empty(&work->entry)))
2191 		goto out;
2192 
2193 	pwq->nr_in_flight[pwq->work_color]++;
2194 	work_flags = work_color_to_flags(pwq->work_color);
2195 
2196 	/*
2197 	 * Limit the number of concurrently active work items to max_active.
2198 	 * @work must also queue behind existing inactive work items to maintain
2199 	 * ordering when max_active changes. See wq_adjust_max_active().
2200 	 */
2201 	if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) {
2202 		if (list_empty(&pool->worklist))
2203 			pool->watchdog_ts = jiffies;
2204 
2205 		trace_workqueue_activate_work(work);
2206 		insert_work(pwq, work, &pool->worklist, work_flags);
2207 		kick_pool(pool);
2208 	} else {
2209 		work_flags |= WORK_STRUCT_INACTIVE;
2210 		insert_work(pwq, work, &pwq->inactive_works, work_flags);
2211 	}
2212 
2213 out:
2214 	raw_spin_unlock(&pool->lock);
2215 	rcu_read_unlock();
2216 }
2217 
2218 /**
2219  * queue_work_on - queue work on specific cpu
2220  * @cpu: CPU number to execute work on
2221  * @wq: workqueue to use
2222  * @work: work to queue
2223  *
2224  * We queue the work to a specific CPU, the caller must ensure it
2225  * can't go away.  Callers that fail to ensure that the specified
2226  * CPU cannot go away will execute on a randomly chosen CPU.
2227  * But note well that callers specifying a CPU that never has been
2228  * online will get a splat.
2229  *
2230  * Return: %false if @work was already on a queue, %true otherwise.
2231  */
2232 bool queue_work_on(int cpu, struct workqueue_struct *wq,
2233 		   struct work_struct *work)
2234 {
2235 	bool ret = false;
2236 	unsigned long flags;
2237 
2238 	local_irq_save(flags);
2239 
2240 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2241 		__queue_work(cpu, wq, work);
2242 		ret = true;
2243 	}
2244 
2245 	local_irq_restore(flags);
2246 	return ret;
2247 }
2248 EXPORT_SYMBOL(queue_work_on);
2249 
2250 /**
2251  * select_numa_node_cpu - Select a CPU based on NUMA node
2252  * @node: NUMA node ID that we want to select a CPU from
2253  *
2254  * This function will attempt to find a "random" cpu available on a given
2255  * node. If there are no CPUs available on the given node it will return
2256  * WORK_CPU_UNBOUND indicating that we should just schedule to any
2257  * available CPU if we need to schedule this work.
2258  */
2259 static int select_numa_node_cpu(int node)
2260 {
2261 	int cpu;
2262 
2263 	/* Delay binding to CPU if node is not valid or online */
2264 	if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
2265 		return WORK_CPU_UNBOUND;
2266 
2267 	/* Use local node/cpu if we are already there */
2268 	cpu = raw_smp_processor_id();
2269 	if (node == cpu_to_node(cpu))
2270 		return cpu;
2271 
2272 	/* Use "random" otherwise know as "first" online CPU of node */
2273 	cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
2274 
2275 	/* If CPU is valid return that, otherwise just defer */
2276 	return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
2277 }
2278 
2279 /**
2280  * queue_work_node - queue work on a "random" cpu for a given NUMA node
2281  * @node: NUMA node that we are targeting the work for
2282  * @wq: workqueue to use
2283  * @work: work to queue
2284  *
2285  * We queue the work to a "random" CPU within a given NUMA node. The basic
2286  * idea here is to provide a way to somehow associate work with a given
2287  * NUMA node.
2288  *
2289  * This function will only make a best effort attempt at getting this onto
2290  * the right NUMA node. If no node is requested or the requested node is
2291  * offline then we just fall back to standard queue_work behavior.
2292  *
2293  * Currently the "random" CPU ends up being the first available CPU in the
2294  * intersection of cpu_online_mask and the cpumask of the node, unless we
2295  * are running on the node. In that case we just use the current CPU.
2296  *
2297  * Return: %false if @work was already on a queue, %true otherwise.
2298  */
2299 bool queue_work_node(int node, struct workqueue_struct *wq,
2300 		     struct work_struct *work)
2301 {
2302 	unsigned long flags;
2303 	bool ret = false;
2304 
2305 	/*
2306 	 * This current implementation is specific to unbound workqueues.
2307 	 * Specifically we only return the first available CPU for a given
2308 	 * node instead of cycling through individual CPUs within the node.
2309 	 *
2310 	 * If this is used with a per-cpu workqueue then the logic in
2311 	 * workqueue_select_cpu_near would need to be updated to allow for
2312 	 * some round robin type logic.
2313 	 */
2314 	WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
2315 
2316 	local_irq_save(flags);
2317 
2318 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2319 		int cpu = select_numa_node_cpu(node);
2320 
2321 		__queue_work(cpu, wq, work);
2322 		ret = true;
2323 	}
2324 
2325 	local_irq_restore(flags);
2326 	return ret;
2327 }
2328 EXPORT_SYMBOL_GPL(queue_work_node);
2329 
2330 void delayed_work_timer_fn(struct timer_list *t)
2331 {
2332 	struct delayed_work *dwork = from_timer(dwork, t, timer);
2333 
2334 	/* should have been called from irqsafe timer with irq already off */
2335 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
2336 }
2337 EXPORT_SYMBOL(delayed_work_timer_fn);
2338 
2339 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
2340 				struct delayed_work *dwork, unsigned long delay)
2341 {
2342 	struct timer_list *timer = &dwork->timer;
2343 	struct work_struct *work = &dwork->work;
2344 
2345 	WARN_ON_ONCE(!wq);
2346 	WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
2347 	WARN_ON_ONCE(timer_pending(timer));
2348 	WARN_ON_ONCE(!list_empty(&work->entry));
2349 
2350 	/*
2351 	 * If @delay is 0, queue @dwork->work immediately.  This is for
2352 	 * both optimization and correctness.  The earliest @timer can
2353 	 * expire is on the closest next tick and delayed_work users depend
2354 	 * on that there's no such delay when @delay is 0.
2355 	 */
2356 	if (!delay) {
2357 		__queue_work(cpu, wq, &dwork->work);
2358 		return;
2359 	}
2360 
2361 	dwork->wq = wq;
2362 	dwork->cpu = cpu;
2363 	timer->expires = jiffies + delay;
2364 
2365 	if (unlikely(cpu != WORK_CPU_UNBOUND))
2366 		add_timer_on(timer, cpu);
2367 	else
2368 		add_timer(timer);
2369 }
2370 
2371 /**
2372  * queue_delayed_work_on - queue work on specific CPU after delay
2373  * @cpu: CPU number to execute work on
2374  * @wq: workqueue to use
2375  * @dwork: work to queue
2376  * @delay: number of jiffies to wait before queueing
2377  *
2378  * Return: %false if @work was already on a queue, %true otherwise.  If
2379  * @delay is zero and @dwork is idle, it will be scheduled for immediate
2380  * execution.
2381  */
2382 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
2383 			   struct delayed_work *dwork, unsigned long delay)
2384 {
2385 	struct work_struct *work = &dwork->work;
2386 	bool ret = false;
2387 	unsigned long flags;
2388 
2389 	/* read the comment in __queue_work() */
2390 	local_irq_save(flags);
2391 
2392 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2393 		__queue_delayed_work(cpu, wq, dwork, delay);
2394 		ret = true;
2395 	}
2396 
2397 	local_irq_restore(flags);
2398 	return ret;
2399 }
2400 EXPORT_SYMBOL(queue_delayed_work_on);
2401 
2402 /**
2403  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2404  * @cpu: CPU number to execute work on
2405  * @wq: workqueue to use
2406  * @dwork: work to queue
2407  * @delay: number of jiffies to wait before queueing
2408  *
2409  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2410  * modify @dwork's timer so that it expires after @delay.  If @delay is
2411  * zero, @work is guaranteed to be scheduled immediately regardless of its
2412  * current state.
2413  *
2414  * Return: %false if @dwork was idle and queued, %true if @dwork was
2415  * pending and its timer was modified.
2416  *
2417  * This function is safe to call from any context including IRQ handler.
2418  * See try_to_grab_pending() for details.
2419  */
2420 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2421 			 struct delayed_work *dwork, unsigned long delay)
2422 {
2423 	unsigned long flags;
2424 	int ret;
2425 
2426 	do {
2427 		ret = try_to_grab_pending(&dwork->work, true, &flags);
2428 	} while (unlikely(ret == -EAGAIN));
2429 
2430 	if (likely(ret >= 0)) {
2431 		__queue_delayed_work(cpu, wq, dwork, delay);
2432 		local_irq_restore(flags);
2433 	}
2434 
2435 	/* -ENOENT from try_to_grab_pending() becomes %true */
2436 	return ret;
2437 }
2438 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2439 
2440 static void rcu_work_rcufn(struct rcu_head *rcu)
2441 {
2442 	struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2443 
2444 	/* read the comment in __queue_work() */
2445 	local_irq_disable();
2446 	__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2447 	local_irq_enable();
2448 }
2449 
2450 /**
2451  * queue_rcu_work - queue work after a RCU grace period
2452  * @wq: workqueue to use
2453  * @rwork: work to queue
2454  *
2455  * Return: %false if @rwork was already pending, %true otherwise.  Note
2456  * that a full RCU grace period is guaranteed only after a %true return.
2457  * While @rwork is guaranteed to be executed after a %false return, the
2458  * execution may happen before a full RCU grace period has passed.
2459  */
2460 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2461 {
2462 	struct work_struct *work = &rwork->work;
2463 
2464 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2465 		rwork->wq = wq;
2466 		call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
2467 		return true;
2468 	}
2469 
2470 	return false;
2471 }
2472 EXPORT_SYMBOL(queue_rcu_work);
2473 
2474 static struct worker *alloc_worker(int node)
2475 {
2476 	struct worker *worker;
2477 
2478 	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
2479 	if (worker) {
2480 		INIT_LIST_HEAD(&worker->entry);
2481 		INIT_LIST_HEAD(&worker->scheduled);
2482 		INIT_LIST_HEAD(&worker->node);
2483 		/* on creation a worker is in !idle && prep state */
2484 		worker->flags = WORKER_PREP;
2485 	}
2486 	return worker;
2487 }
2488 
2489 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
2490 {
2491 	if (pool->cpu < 0 && pool->attrs->affn_strict)
2492 		return pool->attrs->__pod_cpumask;
2493 	else
2494 		return pool->attrs->cpumask;
2495 }
2496 
2497 /**
2498  * worker_attach_to_pool() - attach a worker to a pool
2499  * @worker: worker to be attached
2500  * @pool: the target pool
2501  *
2502  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
2503  * cpu-binding of @worker are kept coordinated with the pool across
2504  * cpu-[un]hotplugs.
2505  */
2506 static void worker_attach_to_pool(struct worker *worker,
2507 				   struct worker_pool *pool)
2508 {
2509 	mutex_lock(&wq_pool_attach_mutex);
2510 
2511 	/*
2512 	 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
2513 	 * stable across this function.  See the comments above the flag
2514 	 * definition for details.
2515 	 */
2516 	if (pool->flags & POOL_DISASSOCIATED)
2517 		worker->flags |= WORKER_UNBOUND;
2518 	else
2519 		kthread_set_per_cpu(worker->task, pool->cpu);
2520 
2521 	if (worker->rescue_wq)
2522 		set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
2523 
2524 	list_add_tail(&worker->node, &pool->workers);
2525 	worker->pool = pool;
2526 
2527 	mutex_unlock(&wq_pool_attach_mutex);
2528 }
2529 
2530 /**
2531  * worker_detach_from_pool() - detach a worker from its pool
2532  * @worker: worker which is attached to its pool
2533  *
2534  * Undo the attaching which had been done in worker_attach_to_pool().  The
2535  * caller worker shouldn't access to the pool after detached except it has
2536  * other reference to the pool.
2537  */
2538 static void worker_detach_from_pool(struct worker *worker)
2539 {
2540 	struct worker_pool *pool = worker->pool;
2541 	struct completion *detach_completion = NULL;
2542 
2543 	mutex_lock(&wq_pool_attach_mutex);
2544 
2545 	kthread_set_per_cpu(worker->task, -1);
2546 	list_del(&worker->node);
2547 	worker->pool = NULL;
2548 
2549 	if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
2550 		detach_completion = pool->detach_completion;
2551 	mutex_unlock(&wq_pool_attach_mutex);
2552 
2553 	/* clear leftover flags without pool->lock after it is detached */
2554 	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2555 
2556 	if (detach_completion)
2557 		complete(detach_completion);
2558 }
2559 
2560 /**
2561  * create_worker - create a new workqueue worker
2562  * @pool: pool the new worker will belong to
2563  *
2564  * Create and start a new worker which is attached to @pool.
2565  *
2566  * CONTEXT:
2567  * Might sleep.  Does GFP_KERNEL allocations.
2568  *
2569  * Return:
2570  * Pointer to the newly created worker.
2571  */
2572 static struct worker *create_worker(struct worker_pool *pool)
2573 {
2574 	struct worker *worker;
2575 	int id;
2576 	char id_buf[23];
2577 
2578 	/* ID is needed to determine kthread name */
2579 	id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
2580 	if (id < 0) {
2581 		pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
2582 			    ERR_PTR(id));
2583 		return NULL;
2584 	}
2585 
2586 	worker = alloc_worker(pool->node);
2587 	if (!worker) {
2588 		pr_err_once("workqueue: Failed to allocate a worker\n");
2589 		goto fail;
2590 	}
2591 
2592 	worker->id = id;
2593 
2594 	if (pool->cpu >= 0)
2595 		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
2596 			 pool->attrs->nice < 0  ? "H" : "");
2597 	else
2598 		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
2599 
2600 	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
2601 					      "kworker/%s", id_buf);
2602 	if (IS_ERR(worker->task)) {
2603 		if (PTR_ERR(worker->task) == -EINTR) {
2604 			pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
2605 			       id_buf);
2606 		} else {
2607 			pr_err_once("workqueue: Failed to create a worker thread: %pe",
2608 				    worker->task);
2609 		}
2610 		goto fail;
2611 	}
2612 
2613 	set_user_nice(worker->task, pool->attrs->nice);
2614 	kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
2615 
2616 	/* successful, attach the worker to the pool */
2617 	worker_attach_to_pool(worker, pool);
2618 
2619 	/* start the newly created worker */
2620 	raw_spin_lock_irq(&pool->lock);
2621 
2622 	worker->pool->nr_workers++;
2623 	worker_enter_idle(worker);
2624 
2625 	/*
2626 	 * @worker is waiting on a completion in kthread() and will trigger hung
2627 	 * check if not woken up soon. As kick_pool() is noop if @pool is empty,
2628 	 * wake it up explicitly.
2629 	 */
2630 	wake_up_process(worker->task);
2631 
2632 	raw_spin_unlock_irq(&pool->lock);
2633 
2634 	return worker;
2635 
2636 fail:
2637 	ida_free(&pool->worker_ida, id);
2638 	kfree(worker);
2639 	return NULL;
2640 }
2641 
2642 static void unbind_worker(struct worker *worker)
2643 {
2644 	lockdep_assert_held(&wq_pool_attach_mutex);
2645 
2646 	kthread_set_per_cpu(worker->task, -1);
2647 	if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
2648 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
2649 	else
2650 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2651 }
2652 
2653 static void wake_dying_workers(struct list_head *cull_list)
2654 {
2655 	struct worker *worker, *tmp;
2656 
2657 	list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2658 		list_del_init(&worker->entry);
2659 		unbind_worker(worker);
2660 		/*
2661 		 * If the worker was somehow already running, then it had to be
2662 		 * in pool->idle_list when set_worker_dying() happened or we
2663 		 * wouldn't have gotten here.
2664 		 *
2665 		 * Thus, the worker must either have observed the WORKER_DIE
2666 		 * flag, or have set its state to TASK_IDLE. Either way, the
2667 		 * below will be observed by the worker and is safe to do
2668 		 * outside of pool->lock.
2669 		 */
2670 		wake_up_process(worker->task);
2671 	}
2672 }
2673 
2674 /**
2675  * set_worker_dying - Tag a worker for destruction
2676  * @worker: worker to be destroyed
2677  * @list: transfer worker away from its pool->idle_list and into list
2678  *
2679  * Tag @worker for destruction and adjust @pool stats accordingly.  The worker
2680  * should be idle.
2681  *
2682  * CONTEXT:
2683  * raw_spin_lock_irq(pool->lock).
2684  */
2685 static void set_worker_dying(struct worker *worker, struct list_head *list)
2686 {
2687 	struct worker_pool *pool = worker->pool;
2688 
2689 	lockdep_assert_held(&pool->lock);
2690 	lockdep_assert_held(&wq_pool_attach_mutex);
2691 
2692 	/* sanity check frenzy */
2693 	if (WARN_ON(worker->current_work) ||
2694 	    WARN_ON(!list_empty(&worker->scheduled)) ||
2695 	    WARN_ON(!(worker->flags & WORKER_IDLE)))
2696 		return;
2697 
2698 	pool->nr_workers--;
2699 	pool->nr_idle--;
2700 
2701 	worker->flags |= WORKER_DIE;
2702 
2703 	list_move(&worker->entry, list);
2704 	list_move(&worker->node, &pool->dying_workers);
2705 }
2706 
2707 /**
2708  * idle_worker_timeout - check if some idle workers can now be deleted.
2709  * @t: The pool's idle_timer that just expired
2710  *
2711  * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2712  * worker_leave_idle(), as a worker flicking between idle and active while its
2713  * pool is at the too_many_workers() tipping point would cause too much timer
2714  * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2715  * it expire and re-evaluate things from there.
2716  */
2717 static void idle_worker_timeout(struct timer_list *t)
2718 {
2719 	struct worker_pool *pool = from_timer(pool, t, idle_timer);
2720 	bool do_cull = false;
2721 
2722 	if (work_pending(&pool->idle_cull_work))
2723 		return;
2724 
2725 	raw_spin_lock_irq(&pool->lock);
2726 
2727 	if (too_many_workers(pool)) {
2728 		struct worker *worker;
2729 		unsigned long expires;
2730 
2731 		/* idle_list is kept in LIFO order, check the last one */
2732 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2733 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2734 		do_cull = !time_before(jiffies, expires);
2735 
2736 		if (!do_cull)
2737 			mod_timer(&pool->idle_timer, expires);
2738 	}
2739 	raw_spin_unlock_irq(&pool->lock);
2740 
2741 	if (do_cull)
2742 		queue_work(system_unbound_wq, &pool->idle_cull_work);
2743 }
2744 
2745 /**
2746  * idle_cull_fn - cull workers that have been idle for too long.
2747  * @work: the pool's work for handling these idle workers
2748  *
2749  * This goes through a pool's idle workers and gets rid of those that have been
2750  * idle for at least IDLE_WORKER_TIMEOUT seconds.
2751  *
2752  * We don't want to disturb isolated CPUs because of a pcpu kworker being
2753  * culled, so this also resets worker affinity. This requires a sleepable
2754  * context, hence the split between timer callback and work item.
2755  */
2756 static void idle_cull_fn(struct work_struct *work)
2757 {
2758 	struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
2759 	LIST_HEAD(cull_list);
2760 
2761 	/*
2762 	 * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2763 	 * cannot proceed beyong worker_detach_from_pool() in its self-destruct
2764 	 * path. This is required as a previously-preempted worker could run after
2765 	 * set_worker_dying() has happened but before wake_dying_workers() did.
2766 	 */
2767 	mutex_lock(&wq_pool_attach_mutex);
2768 	raw_spin_lock_irq(&pool->lock);
2769 
2770 	while (too_many_workers(pool)) {
2771 		struct worker *worker;
2772 		unsigned long expires;
2773 
2774 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2775 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2776 
2777 		if (time_before(jiffies, expires)) {
2778 			mod_timer(&pool->idle_timer, expires);
2779 			break;
2780 		}
2781 
2782 		set_worker_dying(worker, &cull_list);
2783 	}
2784 
2785 	raw_spin_unlock_irq(&pool->lock);
2786 	wake_dying_workers(&cull_list);
2787 	mutex_unlock(&wq_pool_attach_mutex);
2788 }
2789 
2790 static void send_mayday(struct work_struct *work)
2791 {
2792 	struct pool_workqueue *pwq = get_work_pwq(work);
2793 	struct workqueue_struct *wq = pwq->wq;
2794 
2795 	lockdep_assert_held(&wq_mayday_lock);
2796 
2797 	if (!wq->rescuer)
2798 		return;
2799 
2800 	/* mayday mayday mayday */
2801 	if (list_empty(&pwq->mayday_node)) {
2802 		/*
2803 		 * If @pwq is for an unbound wq, its base ref may be put at
2804 		 * any time due to an attribute change.  Pin @pwq until the
2805 		 * rescuer is done with it.
2806 		 */
2807 		get_pwq(pwq);
2808 		list_add_tail(&pwq->mayday_node, &wq->maydays);
2809 		wake_up_process(wq->rescuer->task);
2810 		pwq->stats[PWQ_STAT_MAYDAY]++;
2811 	}
2812 }
2813 
2814 static void pool_mayday_timeout(struct timer_list *t)
2815 {
2816 	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2817 	struct work_struct *work;
2818 
2819 	raw_spin_lock_irq(&pool->lock);
2820 	raw_spin_lock(&wq_mayday_lock);		/* for wq->maydays */
2821 
2822 	if (need_to_create_worker(pool)) {
2823 		/*
2824 		 * We've been trying to create a new worker but
2825 		 * haven't been successful.  We might be hitting an
2826 		 * allocation deadlock.  Send distress signals to
2827 		 * rescuers.
2828 		 */
2829 		list_for_each_entry(work, &pool->worklist, entry)
2830 			send_mayday(work);
2831 	}
2832 
2833 	raw_spin_unlock(&wq_mayday_lock);
2834 	raw_spin_unlock_irq(&pool->lock);
2835 
2836 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2837 }
2838 
2839 /**
2840  * maybe_create_worker - create a new worker if necessary
2841  * @pool: pool to create a new worker for
2842  *
2843  * Create a new worker for @pool if necessary.  @pool is guaranteed to
2844  * have at least one idle worker on return from this function.  If
2845  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2846  * sent to all rescuers with works scheduled on @pool to resolve
2847  * possible allocation deadlock.
2848  *
2849  * On return, need_to_create_worker() is guaranteed to be %false and
2850  * may_start_working() %true.
2851  *
2852  * LOCKING:
2853  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2854  * multiple times.  Does GFP_KERNEL allocations.  Called only from
2855  * manager.
2856  */
2857 static void maybe_create_worker(struct worker_pool *pool)
2858 __releases(&pool->lock)
2859 __acquires(&pool->lock)
2860 {
2861 restart:
2862 	raw_spin_unlock_irq(&pool->lock);
2863 
2864 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2865 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2866 
2867 	while (true) {
2868 		if (create_worker(pool) || !need_to_create_worker(pool))
2869 			break;
2870 
2871 		schedule_timeout_interruptible(CREATE_COOLDOWN);
2872 
2873 		if (!need_to_create_worker(pool))
2874 			break;
2875 	}
2876 
2877 	del_timer_sync(&pool->mayday_timer);
2878 	raw_spin_lock_irq(&pool->lock);
2879 	/*
2880 	 * This is necessary even after a new worker was just successfully
2881 	 * created as @pool->lock was dropped and the new worker might have
2882 	 * already become busy.
2883 	 */
2884 	if (need_to_create_worker(pool))
2885 		goto restart;
2886 }
2887 
2888 /**
2889  * manage_workers - manage worker pool
2890  * @worker: self
2891  *
2892  * Assume the manager role and manage the worker pool @worker belongs
2893  * to.  At any given time, there can be only zero or one manager per
2894  * pool.  The exclusion is handled automatically by this function.
2895  *
2896  * The caller can safely start processing works on false return.  On
2897  * true return, it's guaranteed that need_to_create_worker() is false
2898  * and may_start_working() is true.
2899  *
2900  * CONTEXT:
2901  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2902  * multiple times.  Does GFP_KERNEL allocations.
2903  *
2904  * Return:
2905  * %false if the pool doesn't need management and the caller can safely
2906  * start processing works, %true if management function was performed and
2907  * the conditions that the caller verified before calling the function may
2908  * no longer be true.
2909  */
2910 static bool manage_workers(struct worker *worker)
2911 {
2912 	struct worker_pool *pool = worker->pool;
2913 
2914 	if (pool->flags & POOL_MANAGER_ACTIVE)
2915 		return false;
2916 
2917 	pool->flags |= POOL_MANAGER_ACTIVE;
2918 	pool->manager = worker;
2919 
2920 	maybe_create_worker(pool);
2921 
2922 	pool->manager = NULL;
2923 	pool->flags &= ~POOL_MANAGER_ACTIVE;
2924 	rcuwait_wake_up(&manager_wait);
2925 	return true;
2926 }
2927 
2928 /**
2929  * process_one_work - process single work
2930  * @worker: self
2931  * @work: work to process
2932  *
2933  * Process @work.  This function contains all the logics necessary to
2934  * process a single work including synchronization against and
2935  * interaction with other workers on the same cpu, queueing and
2936  * flushing.  As long as context requirement is met, any worker can
2937  * call this function to process a work.
2938  *
2939  * CONTEXT:
2940  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2941  */
2942 static void process_one_work(struct worker *worker, struct work_struct *work)
2943 __releases(&pool->lock)
2944 __acquires(&pool->lock)
2945 {
2946 	struct pool_workqueue *pwq = get_work_pwq(work);
2947 	struct worker_pool *pool = worker->pool;
2948 	unsigned long work_data;
2949 #ifdef CONFIG_LOCKDEP
2950 	/*
2951 	 * It is permissible to free the struct work_struct from
2952 	 * inside the function that is called from it, this we need to
2953 	 * take into account for lockdep too.  To avoid bogus "held
2954 	 * lock freed" warnings as well as problems when looking into
2955 	 * work->lockdep_map, make a copy and use that here.
2956 	 */
2957 	struct lockdep_map lockdep_map;
2958 
2959 	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2960 #endif
2961 	/* ensure we're on the correct CPU */
2962 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2963 		     raw_smp_processor_id() != pool->cpu);
2964 
2965 	/* claim and dequeue */
2966 	debug_work_deactivate(work);
2967 	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2968 	worker->current_work = work;
2969 	worker->current_func = work->func;
2970 	worker->current_pwq = pwq;
2971 	worker->current_at = worker->task->se.sum_exec_runtime;
2972 	work_data = *work_data_bits(work);
2973 	worker->current_color = get_work_color(work_data);
2974 
2975 	/*
2976 	 * Record wq name for cmdline and debug reporting, may get
2977 	 * overridden through set_worker_desc().
2978 	 */
2979 	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2980 
2981 	list_del_init(&work->entry);
2982 
2983 	/*
2984 	 * CPU intensive works don't participate in concurrency management.
2985 	 * They're the scheduler's responsibility.  This takes @worker out
2986 	 * of concurrency management and the next code block will chain
2987 	 * execution of the pending work items.
2988 	 */
2989 	if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
2990 		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2991 
2992 	/*
2993 	 * Kick @pool if necessary. It's always noop for per-cpu worker pools
2994 	 * since nr_running would always be >= 1 at this point. This is used to
2995 	 * chain execution of the pending work items for WORKER_NOT_RUNNING
2996 	 * workers such as the UNBOUND and CPU_INTENSIVE ones.
2997 	 */
2998 	kick_pool(pool);
2999 
3000 	/*
3001 	 * Record the last pool and clear PENDING which should be the last
3002 	 * update to @work.  Also, do this inside @pool->lock so that
3003 	 * PENDING and queued state changes happen together while IRQ is
3004 	 * disabled.
3005 	 */
3006 	set_work_pool_and_clear_pending(work, pool->id);
3007 
3008 	pwq->stats[PWQ_STAT_STARTED]++;
3009 	raw_spin_unlock_irq(&pool->lock);
3010 
3011 	lock_map_acquire(&pwq->wq->lockdep_map);
3012 	lock_map_acquire(&lockdep_map);
3013 	/*
3014 	 * Strictly speaking we should mark the invariant state without holding
3015 	 * any locks, that is, before these two lock_map_acquire()'s.
3016 	 *
3017 	 * However, that would result in:
3018 	 *
3019 	 *   A(W1)
3020 	 *   WFC(C)
3021 	 *		A(W1)
3022 	 *		C(C)
3023 	 *
3024 	 * Which would create W1->C->W1 dependencies, even though there is no
3025 	 * actual deadlock possible. There are two solutions, using a
3026 	 * read-recursive acquire on the work(queue) 'locks', but this will then
3027 	 * hit the lockdep limitation on recursive locks, or simply discard
3028 	 * these locks.
3029 	 *
3030 	 * AFAICT there is no possible deadlock scenario between the
3031 	 * flush_work() and complete() primitives (except for single-threaded
3032 	 * workqueues), so hiding them isn't a problem.
3033 	 */
3034 	lockdep_invariant_state(true);
3035 	trace_workqueue_execute_start(work);
3036 	worker->current_func(work);
3037 	/*
3038 	 * While we must be careful to not use "work" after this, the trace
3039 	 * point will only record its address.
3040 	 */
3041 	trace_workqueue_execute_end(work, worker->current_func);
3042 	pwq->stats[PWQ_STAT_COMPLETED]++;
3043 	lock_map_release(&lockdep_map);
3044 	lock_map_release(&pwq->wq->lockdep_map);
3045 
3046 	if (unlikely(in_atomic() || lockdep_depth(current) > 0 ||
3047 		     rcu_preempt_depth() > 0)) {
3048 		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n"
3049 		       "     last function: %ps\n",
3050 		       current->comm, preempt_count(), rcu_preempt_depth(),
3051 		       task_pid_nr(current), worker->current_func);
3052 		debug_show_held_locks(current);
3053 		dump_stack();
3054 	}
3055 
3056 	/*
3057 	 * The following prevents a kworker from hogging CPU on !PREEMPTION
3058 	 * kernels, where a requeueing work item waiting for something to
3059 	 * happen could deadlock with stop_machine as such work item could
3060 	 * indefinitely requeue itself while all other CPUs are trapped in
3061 	 * stop_machine. At the same time, report a quiescent RCU state so
3062 	 * the same condition doesn't freeze RCU.
3063 	 */
3064 	cond_resched();
3065 
3066 	raw_spin_lock_irq(&pool->lock);
3067 
3068 	/*
3069 	 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
3070 	 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
3071 	 * wq_cpu_intensive_thresh_us. Clear it.
3072 	 */
3073 	worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
3074 
3075 	/* tag the worker for identification in schedule() */
3076 	worker->last_func = worker->current_func;
3077 
3078 	/* we're done with it, release */
3079 	hash_del(&worker->hentry);
3080 	worker->current_work = NULL;
3081 	worker->current_func = NULL;
3082 	worker->current_pwq = NULL;
3083 	worker->current_color = INT_MAX;
3084 
3085 	/* must be the last step, see the function comment */
3086 	pwq_dec_nr_in_flight(pwq, work_data);
3087 }
3088 
3089 /**
3090  * process_scheduled_works - process scheduled works
3091  * @worker: self
3092  *
3093  * Process all scheduled works.  Please note that the scheduled list
3094  * may change while processing a work, so this function repeatedly
3095  * fetches a work from the top and executes it.
3096  *
3097  * CONTEXT:
3098  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3099  * multiple times.
3100  */
3101 static void process_scheduled_works(struct worker *worker)
3102 {
3103 	struct work_struct *work;
3104 	bool first = true;
3105 
3106 	while ((work = list_first_entry_or_null(&worker->scheduled,
3107 						struct work_struct, entry))) {
3108 		if (first) {
3109 			worker->pool->watchdog_ts = jiffies;
3110 			first = false;
3111 		}
3112 		process_one_work(worker, work);
3113 	}
3114 }
3115 
3116 static void set_pf_worker(bool val)
3117 {
3118 	mutex_lock(&wq_pool_attach_mutex);
3119 	if (val)
3120 		current->flags |= PF_WQ_WORKER;
3121 	else
3122 		current->flags &= ~PF_WQ_WORKER;
3123 	mutex_unlock(&wq_pool_attach_mutex);
3124 }
3125 
3126 /**
3127  * worker_thread - the worker thread function
3128  * @__worker: self
3129  *
3130  * The worker thread function.  All workers belong to a worker_pool -
3131  * either a per-cpu one or dynamic unbound one.  These workers process all
3132  * work items regardless of their specific target workqueue.  The only
3133  * exception is work items which belong to workqueues with a rescuer which
3134  * will be explained in rescuer_thread().
3135  *
3136  * Return: 0
3137  */
3138 static int worker_thread(void *__worker)
3139 {
3140 	struct worker *worker = __worker;
3141 	struct worker_pool *pool = worker->pool;
3142 
3143 	/* tell the scheduler that this is a workqueue worker */
3144 	set_pf_worker(true);
3145 woke_up:
3146 	raw_spin_lock_irq(&pool->lock);
3147 
3148 	/* am I supposed to die? */
3149 	if (unlikely(worker->flags & WORKER_DIE)) {
3150 		raw_spin_unlock_irq(&pool->lock);
3151 		set_pf_worker(false);
3152 
3153 		set_task_comm(worker->task, "kworker/dying");
3154 		ida_free(&pool->worker_ida, worker->id);
3155 		worker_detach_from_pool(worker);
3156 		WARN_ON_ONCE(!list_empty(&worker->entry));
3157 		kfree(worker);
3158 		return 0;
3159 	}
3160 
3161 	worker_leave_idle(worker);
3162 recheck:
3163 	/* no more worker necessary? */
3164 	if (!need_more_worker(pool))
3165 		goto sleep;
3166 
3167 	/* do we need to manage? */
3168 	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
3169 		goto recheck;
3170 
3171 	/*
3172 	 * ->scheduled list can only be filled while a worker is
3173 	 * preparing to process a work or actually processing it.
3174 	 * Make sure nobody diddled with it while I was sleeping.
3175 	 */
3176 	WARN_ON_ONCE(!list_empty(&worker->scheduled));
3177 
3178 	/*
3179 	 * Finish PREP stage.  We're guaranteed to have at least one idle
3180 	 * worker or that someone else has already assumed the manager
3181 	 * role.  This is where @worker starts participating in concurrency
3182 	 * management if applicable and concurrency management is restored
3183 	 * after being rebound.  See rebind_workers() for details.
3184 	 */
3185 	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
3186 
3187 	do {
3188 		struct work_struct *work =
3189 			list_first_entry(&pool->worklist,
3190 					 struct work_struct, entry);
3191 
3192 		if (assign_work(work, worker, NULL))
3193 			process_scheduled_works(worker);
3194 	} while (keep_working(pool));
3195 
3196 	worker_set_flags(worker, WORKER_PREP);
3197 sleep:
3198 	/*
3199 	 * pool->lock is held and there's no work to process and no need to
3200 	 * manage, sleep.  Workers are woken up only while holding
3201 	 * pool->lock or from local cpu, so setting the current state
3202 	 * before releasing pool->lock is enough to prevent losing any
3203 	 * event.
3204 	 */
3205 	worker_enter_idle(worker);
3206 	__set_current_state(TASK_IDLE);
3207 	raw_spin_unlock_irq(&pool->lock);
3208 	schedule();
3209 	goto woke_up;
3210 }
3211 
3212 /**
3213  * rescuer_thread - the rescuer thread function
3214  * @__rescuer: self
3215  *
3216  * Workqueue rescuer thread function.  There's one rescuer for each
3217  * workqueue which has WQ_MEM_RECLAIM set.
3218  *
3219  * Regular work processing on a pool may block trying to create a new
3220  * worker which uses GFP_KERNEL allocation which has slight chance of
3221  * developing into deadlock if some works currently on the same queue
3222  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
3223  * the problem rescuer solves.
3224  *
3225  * When such condition is possible, the pool summons rescuers of all
3226  * workqueues which have works queued on the pool and let them process
3227  * those works so that forward progress can be guaranteed.
3228  *
3229  * This should happen rarely.
3230  *
3231  * Return: 0
3232  */
3233 static int rescuer_thread(void *__rescuer)
3234 {
3235 	struct worker *rescuer = __rescuer;
3236 	struct workqueue_struct *wq = rescuer->rescue_wq;
3237 	bool should_stop;
3238 
3239 	set_user_nice(current, RESCUER_NICE_LEVEL);
3240 
3241 	/*
3242 	 * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
3243 	 * doesn't participate in concurrency management.
3244 	 */
3245 	set_pf_worker(true);
3246 repeat:
3247 	set_current_state(TASK_IDLE);
3248 
3249 	/*
3250 	 * By the time the rescuer is requested to stop, the workqueue
3251 	 * shouldn't have any work pending, but @wq->maydays may still have
3252 	 * pwq(s) queued.  This can happen by non-rescuer workers consuming
3253 	 * all the work items before the rescuer got to them.  Go through
3254 	 * @wq->maydays processing before acting on should_stop so that the
3255 	 * list is always empty on exit.
3256 	 */
3257 	should_stop = kthread_should_stop();
3258 
3259 	/* see whether any pwq is asking for help */
3260 	raw_spin_lock_irq(&wq_mayday_lock);
3261 
3262 	while (!list_empty(&wq->maydays)) {
3263 		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
3264 					struct pool_workqueue, mayday_node);
3265 		struct worker_pool *pool = pwq->pool;
3266 		struct work_struct *work, *n;
3267 
3268 		__set_current_state(TASK_RUNNING);
3269 		list_del_init(&pwq->mayday_node);
3270 
3271 		raw_spin_unlock_irq(&wq_mayday_lock);
3272 
3273 		worker_attach_to_pool(rescuer, pool);
3274 
3275 		raw_spin_lock_irq(&pool->lock);
3276 
3277 		/*
3278 		 * Slurp in all works issued via this workqueue and
3279 		 * process'em.
3280 		 */
3281 		WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
3282 		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
3283 			if (get_work_pwq(work) == pwq &&
3284 			    assign_work(work, rescuer, &n))
3285 				pwq->stats[PWQ_STAT_RESCUED]++;
3286 		}
3287 
3288 		if (!list_empty(&rescuer->scheduled)) {
3289 			process_scheduled_works(rescuer);
3290 
3291 			/*
3292 			 * The above execution of rescued work items could
3293 			 * have created more to rescue through
3294 			 * pwq_activate_first_inactive() or chained
3295 			 * queueing.  Let's put @pwq back on mayday list so
3296 			 * that such back-to-back work items, which may be
3297 			 * being used to relieve memory pressure, don't
3298 			 * incur MAYDAY_INTERVAL delay inbetween.
3299 			 */
3300 			if (pwq->nr_active && need_to_create_worker(pool)) {
3301 				raw_spin_lock(&wq_mayday_lock);
3302 				/*
3303 				 * Queue iff we aren't racing destruction
3304 				 * and somebody else hasn't queued it already.
3305 				 */
3306 				if (wq->rescuer && list_empty(&pwq->mayday_node)) {
3307 					get_pwq(pwq);
3308 					list_add_tail(&pwq->mayday_node, &wq->maydays);
3309 				}
3310 				raw_spin_unlock(&wq_mayday_lock);
3311 			}
3312 		}
3313 
3314 		/*
3315 		 * Put the reference grabbed by send_mayday().  @pool won't
3316 		 * go away while we're still attached to it.
3317 		 */
3318 		put_pwq(pwq);
3319 
3320 		/*
3321 		 * Leave this pool. Notify regular workers; otherwise, we end up
3322 		 * with 0 concurrency and stalling the execution.
3323 		 */
3324 		kick_pool(pool);
3325 
3326 		raw_spin_unlock_irq(&pool->lock);
3327 
3328 		worker_detach_from_pool(rescuer);
3329 
3330 		raw_spin_lock_irq(&wq_mayday_lock);
3331 	}
3332 
3333 	raw_spin_unlock_irq(&wq_mayday_lock);
3334 
3335 	if (should_stop) {
3336 		__set_current_state(TASK_RUNNING);
3337 		set_pf_worker(false);
3338 		return 0;
3339 	}
3340 
3341 	/* rescuers should never participate in concurrency management */
3342 	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
3343 	schedule();
3344 	goto repeat;
3345 }
3346 
3347 /**
3348  * check_flush_dependency - check for flush dependency sanity
3349  * @target_wq: workqueue being flushed
3350  * @target_work: work item being flushed (NULL for workqueue flushes)
3351  *
3352  * %current is trying to flush the whole @target_wq or @target_work on it.
3353  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
3354  * reclaiming memory or running on a workqueue which doesn't have
3355  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
3356  * a deadlock.
3357  */
3358 static void check_flush_dependency(struct workqueue_struct *target_wq,
3359 				   struct work_struct *target_work)
3360 {
3361 	work_func_t target_func = target_work ? target_work->func : NULL;
3362 	struct worker *worker;
3363 
3364 	if (target_wq->flags & WQ_MEM_RECLAIM)
3365 		return;
3366 
3367 	worker = current_wq_worker();
3368 
3369 	WARN_ONCE(current->flags & PF_MEMALLOC,
3370 		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
3371 		  current->pid, current->comm, target_wq->name, target_func);
3372 	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
3373 			      (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
3374 		  "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
3375 		  worker->current_pwq->wq->name, worker->current_func,
3376 		  target_wq->name, target_func);
3377 }
3378 
3379 struct wq_barrier {
3380 	struct work_struct	work;
3381 	struct completion	done;
3382 	struct task_struct	*task;	/* purely informational */
3383 };
3384 
3385 static void wq_barrier_func(struct work_struct *work)
3386 {
3387 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
3388 	complete(&barr->done);
3389 }
3390 
3391 /**
3392  * insert_wq_barrier - insert a barrier work
3393  * @pwq: pwq to insert barrier into
3394  * @barr: wq_barrier to insert
3395  * @target: target work to attach @barr to
3396  * @worker: worker currently executing @target, NULL if @target is not executing
3397  *
3398  * @barr is linked to @target such that @barr is completed only after
3399  * @target finishes execution.  Please note that the ordering
3400  * guarantee is observed only with respect to @target and on the local
3401  * cpu.
3402  *
3403  * Currently, a queued barrier can't be canceled.  This is because
3404  * try_to_grab_pending() can't determine whether the work to be
3405  * grabbed is at the head of the queue and thus can't clear LINKED
3406  * flag of the previous work while there must be a valid next work
3407  * after a work with LINKED flag set.
3408  *
3409  * Note that when @worker is non-NULL, @target may be modified
3410  * underneath us, so we can't reliably determine pwq from @target.
3411  *
3412  * CONTEXT:
3413  * raw_spin_lock_irq(pool->lock).
3414  */
3415 static void insert_wq_barrier(struct pool_workqueue *pwq,
3416 			      struct wq_barrier *barr,
3417 			      struct work_struct *target, struct worker *worker)
3418 {
3419 	unsigned int work_flags = 0;
3420 	unsigned int work_color;
3421 	struct list_head *head;
3422 
3423 	/*
3424 	 * debugobject calls are safe here even with pool->lock locked
3425 	 * as we know for sure that this will not trigger any of the
3426 	 * checks and call back into the fixup functions where we
3427 	 * might deadlock.
3428 	 */
3429 	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
3430 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
3431 
3432 	init_completion_map(&barr->done, &target->lockdep_map);
3433 
3434 	barr->task = current;
3435 
3436 	/* The barrier work item does not participate in nr_active. */
3437 	work_flags |= WORK_STRUCT_INACTIVE;
3438 
3439 	/*
3440 	 * If @target is currently being executed, schedule the
3441 	 * barrier to the worker; otherwise, put it after @target.
3442 	 */
3443 	if (worker) {
3444 		head = worker->scheduled.next;
3445 		work_color = worker->current_color;
3446 	} else {
3447 		unsigned long *bits = work_data_bits(target);
3448 
3449 		head = target->entry.next;
3450 		/* there can already be other linked works, inherit and set */
3451 		work_flags |= *bits & WORK_STRUCT_LINKED;
3452 		work_color = get_work_color(*bits);
3453 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
3454 	}
3455 
3456 	pwq->nr_in_flight[work_color]++;
3457 	work_flags |= work_color_to_flags(work_color);
3458 
3459 	insert_work(pwq, &barr->work, head, work_flags);
3460 }
3461 
3462 /**
3463  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
3464  * @wq: workqueue being flushed
3465  * @flush_color: new flush color, < 0 for no-op
3466  * @work_color: new work color, < 0 for no-op
3467  *
3468  * Prepare pwqs for workqueue flushing.
3469  *
3470  * If @flush_color is non-negative, flush_color on all pwqs should be
3471  * -1.  If no pwq has in-flight commands at the specified color, all
3472  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
3473  * has in flight commands, its pwq->flush_color is set to
3474  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
3475  * wakeup logic is armed and %true is returned.
3476  *
3477  * The caller should have initialized @wq->first_flusher prior to
3478  * calling this function with non-negative @flush_color.  If
3479  * @flush_color is negative, no flush color update is done and %false
3480  * is returned.
3481  *
3482  * If @work_color is non-negative, all pwqs should have the same
3483  * work_color which is previous to @work_color and all will be
3484  * advanced to @work_color.
3485  *
3486  * CONTEXT:
3487  * mutex_lock(wq->mutex).
3488  *
3489  * Return:
3490  * %true if @flush_color >= 0 and there's something to flush.  %false
3491  * otherwise.
3492  */
3493 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3494 				      int flush_color, int work_color)
3495 {
3496 	bool wait = false;
3497 	struct pool_workqueue *pwq;
3498 
3499 	if (flush_color >= 0) {
3500 		WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
3501 		atomic_set(&wq->nr_pwqs_to_flush, 1);
3502 	}
3503 
3504 	for_each_pwq(pwq, wq) {
3505 		struct worker_pool *pool = pwq->pool;
3506 
3507 		raw_spin_lock_irq(&pool->lock);
3508 
3509 		if (flush_color >= 0) {
3510 			WARN_ON_ONCE(pwq->flush_color != -1);
3511 
3512 			if (pwq->nr_in_flight[flush_color]) {
3513 				pwq->flush_color = flush_color;
3514 				atomic_inc(&wq->nr_pwqs_to_flush);
3515 				wait = true;
3516 			}
3517 		}
3518 
3519 		if (work_color >= 0) {
3520 			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
3521 			pwq->work_color = work_color;
3522 		}
3523 
3524 		raw_spin_unlock_irq(&pool->lock);
3525 	}
3526 
3527 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
3528 		complete(&wq->first_flusher->done);
3529 
3530 	return wait;
3531 }
3532 
3533 /**
3534  * __flush_workqueue - ensure that any scheduled work has run to completion.
3535  * @wq: workqueue to flush
3536  *
3537  * This function sleeps until all work items which were queued on entry
3538  * have finished execution, but it is not livelocked by new incoming ones.
3539  */
3540 void __flush_workqueue(struct workqueue_struct *wq)
3541 {
3542 	struct wq_flusher this_flusher = {
3543 		.list = LIST_HEAD_INIT(this_flusher.list),
3544 		.flush_color = -1,
3545 		.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
3546 	};
3547 	int next_color;
3548 
3549 	if (WARN_ON(!wq_online))
3550 		return;
3551 
3552 	lock_map_acquire(&wq->lockdep_map);
3553 	lock_map_release(&wq->lockdep_map);
3554 
3555 	mutex_lock(&wq->mutex);
3556 
3557 	/*
3558 	 * Start-to-wait phase
3559 	 */
3560 	next_color = work_next_color(wq->work_color);
3561 
3562 	if (next_color != wq->flush_color) {
3563 		/*
3564 		 * Color space is not full.  The current work_color
3565 		 * becomes our flush_color and work_color is advanced
3566 		 * by one.
3567 		 */
3568 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
3569 		this_flusher.flush_color = wq->work_color;
3570 		wq->work_color = next_color;
3571 
3572 		if (!wq->first_flusher) {
3573 			/* no flush in progress, become the first flusher */
3574 			WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3575 
3576 			wq->first_flusher = &this_flusher;
3577 
3578 			if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
3579 						       wq->work_color)) {
3580 				/* nothing to flush, done */
3581 				wq->flush_color = next_color;
3582 				wq->first_flusher = NULL;
3583 				goto out_unlock;
3584 			}
3585 		} else {
3586 			/* wait in queue */
3587 			WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
3588 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
3589 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3590 		}
3591 	} else {
3592 		/*
3593 		 * Oops, color space is full, wait on overflow queue.
3594 		 * The next flush completion will assign us
3595 		 * flush_color and transfer to flusher_queue.
3596 		 */
3597 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3598 	}
3599 
3600 	check_flush_dependency(wq, NULL);
3601 
3602 	mutex_unlock(&wq->mutex);
3603 
3604 	wait_for_completion(&this_flusher.done);
3605 
3606 	/*
3607 	 * Wake-up-and-cascade phase
3608 	 *
3609 	 * First flushers are responsible for cascading flushes and
3610 	 * handling overflow.  Non-first flushers can simply return.
3611 	 */
3612 	if (READ_ONCE(wq->first_flusher) != &this_flusher)
3613 		return;
3614 
3615 	mutex_lock(&wq->mutex);
3616 
3617 	/* we might have raced, check again with mutex held */
3618 	if (wq->first_flusher != &this_flusher)
3619 		goto out_unlock;
3620 
3621 	WRITE_ONCE(wq->first_flusher, NULL);
3622 
3623 	WARN_ON_ONCE(!list_empty(&this_flusher.list));
3624 	WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3625 
3626 	while (true) {
3627 		struct wq_flusher *next, *tmp;
3628 
3629 		/* complete all the flushers sharing the current flush color */
3630 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3631 			if (next->flush_color != wq->flush_color)
3632 				break;
3633 			list_del_init(&next->list);
3634 			complete(&next->done);
3635 		}
3636 
3637 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
3638 			     wq->flush_color != work_next_color(wq->work_color));
3639 
3640 		/* this flush_color is finished, advance by one */
3641 		wq->flush_color = work_next_color(wq->flush_color);
3642 
3643 		/* one color has been freed, handle overflow queue */
3644 		if (!list_empty(&wq->flusher_overflow)) {
3645 			/*
3646 			 * Assign the same color to all overflowed
3647 			 * flushers, advance work_color and append to
3648 			 * flusher_queue.  This is the start-to-wait
3649 			 * phase for these overflowed flushers.
3650 			 */
3651 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
3652 				tmp->flush_color = wq->work_color;
3653 
3654 			wq->work_color = work_next_color(wq->work_color);
3655 
3656 			list_splice_tail_init(&wq->flusher_overflow,
3657 					      &wq->flusher_queue);
3658 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3659 		}
3660 
3661 		if (list_empty(&wq->flusher_queue)) {
3662 			WARN_ON_ONCE(wq->flush_color != wq->work_color);
3663 			break;
3664 		}
3665 
3666 		/*
3667 		 * Need to flush more colors.  Make the next flusher
3668 		 * the new first flusher and arm pwqs.
3669 		 */
3670 		WARN_ON_ONCE(wq->flush_color == wq->work_color);
3671 		WARN_ON_ONCE(wq->flush_color != next->flush_color);
3672 
3673 		list_del_init(&next->list);
3674 		wq->first_flusher = next;
3675 
3676 		if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
3677 			break;
3678 
3679 		/*
3680 		 * Meh... this color is already done, clear first
3681 		 * flusher and repeat cascading.
3682 		 */
3683 		wq->first_flusher = NULL;
3684 	}
3685 
3686 out_unlock:
3687 	mutex_unlock(&wq->mutex);
3688 }
3689 EXPORT_SYMBOL(__flush_workqueue);
3690 
3691 /**
3692  * drain_workqueue - drain a workqueue
3693  * @wq: workqueue to drain
3694  *
3695  * Wait until the workqueue becomes empty.  While draining is in progress,
3696  * only chain queueing is allowed.  IOW, only currently pending or running
3697  * work items on @wq can queue further work items on it.  @wq is flushed
3698  * repeatedly until it becomes empty.  The number of flushing is determined
3699  * by the depth of chaining and should be relatively short.  Whine if it
3700  * takes too long.
3701  */
3702 void drain_workqueue(struct workqueue_struct *wq)
3703 {
3704 	unsigned int flush_cnt = 0;
3705 	struct pool_workqueue *pwq;
3706 
3707 	/*
3708 	 * __queue_work() needs to test whether there are drainers, is much
3709 	 * hotter than drain_workqueue() and already looks at @wq->flags.
3710 	 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
3711 	 */
3712 	mutex_lock(&wq->mutex);
3713 	if (!wq->nr_drainers++)
3714 		wq->flags |= __WQ_DRAINING;
3715 	mutex_unlock(&wq->mutex);
3716 reflush:
3717 	__flush_workqueue(wq);
3718 
3719 	mutex_lock(&wq->mutex);
3720 
3721 	for_each_pwq(pwq, wq) {
3722 		bool drained;
3723 
3724 		raw_spin_lock_irq(&pwq->pool->lock);
3725 		drained = pwq_is_empty(pwq);
3726 		raw_spin_unlock_irq(&pwq->pool->lock);
3727 
3728 		if (drained)
3729 			continue;
3730 
3731 		if (++flush_cnt == 10 ||
3732 		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3733 			pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3734 				wq->name, __func__, flush_cnt);
3735 
3736 		mutex_unlock(&wq->mutex);
3737 		goto reflush;
3738 	}
3739 
3740 	if (!--wq->nr_drainers)
3741 		wq->flags &= ~__WQ_DRAINING;
3742 	mutex_unlock(&wq->mutex);
3743 }
3744 EXPORT_SYMBOL_GPL(drain_workqueue);
3745 
3746 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3747 			     bool from_cancel)
3748 {
3749 	struct worker *worker = NULL;
3750 	struct worker_pool *pool;
3751 	struct pool_workqueue *pwq;
3752 
3753 	might_sleep();
3754 
3755 	rcu_read_lock();
3756 	pool = get_work_pool(work);
3757 	if (!pool) {
3758 		rcu_read_unlock();
3759 		return false;
3760 	}
3761 
3762 	raw_spin_lock_irq(&pool->lock);
3763 	/* see the comment in try_to_grab_pending() with the same code */
3764 	pwq = get_work_pwq(work);
3765 	if (pwq) {
3766 		if (unlikely(pwq->pool != pool))
3767 			goto already_gone;
3768 	} else {
3769 		worker = find_worker_executing_work(pool, work);
3770 		if (!worker)
3771 			goto already_gone;
3772 		pwq = worker->current_pwq;
3773 	}
3774 
3775 	check_flush_dependency(pwq->wq, work);
3776 
3777 	insert_wq_barrier(pwq, barr, work, worker);
3778 	raw_spin_unlock_irq(&pool->lock);
3779 
3780 	/*
3781 	 * Force a lock recursion deadlock when using flush_work() inside a
3782 	 * single-threaded or rescuer equipped workqueue.
3783 	 *
3784 	 * For single threaded workqueues the deadlock happens when the work
3785 	 * is after the work issuing the flush_work(). For rescuer equipped
3786 	 * workqueues the deadlock happens when the rescuer stalls, blocking
3787 	 * forward progress.
3788 	 */
3789 	if (!from_cancel &&
3790 	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3791 		lock_map_acquire(&pwq->wq->lockdep_map);
3792 		lock_map_release(&pwq->wq->lockdep_map);
3793 	}
3794 	rcu_read_unlock();
3795 	return true;
3796 already_gone:
3797 	raw_spin_unlock_irq(&pool->lock);
3798 	rcu_read_unlock();
3799 	return false;
3800 }
3801 
3802 static bool __flush_work(struct work_struct *work, bool from_cancel)
3803 {
3804 	struct wq_barrier barr;
3805 
3806 	if (WARN_ON(!wq_online))
3807 		return false;
3808 
3809 	if (WARN_ON(!work->func))
3810 		return false;
3811 
3812 	lock_map_acquire(&work->lockdep_map);
3813 	lock_map_release(&work->lockdep_map);
3814 
3815 	if (start_flush_work(work, &barr, from_cancel)) {
3816 		wait_for_completion(&barr.done);
3817 		destroy_work_on_stack(&barr.work);
3818 		return true;
3819 	} else {
3820 		return false;
3821 	}
3822 }
3823 
3824 /**
3825  * flush_work - wait for a work to finish executing the last queueing instance
3826  * @work: the work to flush
3827  *
3828  * Wait until @work has finished execution.  @work is guaranteed to be idle
3829  * on return if it hasn't been requeued since flush started.
3830  *
3831  * Return:
3832  * %true if flush_work() waited for the work to finish execution,
3833  * %false if it was already idle.
3834  */
3835 bool flush_work(struct work_struct *work)
3836 {
3837 	return __flush_work(work, false);
3838 }
3839 EXPORT_SYMBOL_GPL(flush_work);
3840 
3841 struct cwt_wait {
3842 	wait_queue_entry_t		wait;
3843 	struct work_struct	*work;
3844 };
3845 
3846 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3847 {
3848 	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3849 
3850 	if (cwait->work != key)
3851 		return 0;
3852 	return autoremove_wake_function(wait, mode, sync, key);
3853 }
3854 
3855 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3856 {
3857 	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3858 	unsigned long flags;
3859 	int ret;
3860 
3861 	do {
3862 		ret = try_to_grab_pending(work, is_dwork, &flags);
3863 		/*
3864 		 * If someone else is already canceling, wait for it to
3865 		 * finish.  flush_work() doesn't work for PREEMPT_NONE
3866 		 * because we may get scheduled between @work's completion
3867 		 * and the other canceling task resuming and clearing
3868 		 * CANCELING - flush_work() will return false immediately
3869 		 * as @work is no longer busy, try_to_grab_pending() will
3870 		 * return -ENOENT as @work is still being canceled and the
3871 		 * other canceling task won't be able to clear CANCELING as
3872 		 * we're hogging the CPU.
3873 		 *
3874 		 * Let's wait for completion using a waitqueue.  As this
3875 		 * may lead to the thundering herd problem, use a custom
3876 		 * wake function which matches @work along with exclusive
3877 		 * wait and wakeup.
3878 		 */
3879 		if (unlikely(ret == -ENOENT)) {
3880 			struct cwt_wait cwait;
3881 
3882 			init_wait(&cwait.wait);
3883 			cwait.wait.func = cwt_wakefn;
3884 			cwait.work = work;
3885 
3886 			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3887 						  TASK_UNINTERRUPTIBLE);
3888 			if (work_is_canceling(work))
3889 				schedule();
3890 			finish_wait(&cancel_waitq, &cwait.wait);
3891 		}
3892 	} while (unlikely(ret < 0));
3893 
3894 	/* tell other tasks trying to grab @work to back off */
3895 	mark_work_canceling(work);
3896 	local_irq_restore(flags);
3897 
3898 	/*
3899 	 * This allows canceling during early boot.  We know that @work
3900 	 * isn't executing.
3901 	 */
3902 	if (wq_online)
3903 		__flush_work(work, true);
3904 
3905 	clear_work_data(work);
3906 
3907 	/*
3908 	 * Paired with prepare_to_wait() above so that either
3909 	 * waitqueue_active() is visible here or !work_is_canceling() is
3910 	 * visible there.
3911 	 */
3912 	smp_mb();
3913 	if (waitqueue_active(&cancel_waitq))
3914 		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3915 
3916 	return ret;
3917 }
3918 
3919 /**
3920  * cancel_work_sync - cancel a work and wait for it to finish
3921  * @work: the work to cancel
3922  *
3923  * Cancel @work and wait for its execution to finish.  This function
3924  * can be used even if the work re-queues itself or migrates to
3925  * another workqueue.  On return from this function, @work is
3926  * guaranteed to be not pending or executing on any CPU.
3927  *
3928  * cancel_work_sync(&delayed_work->work) must not be used for
3929  * delayed_work's.  Use cancel_delayed_work_sync() instead.
3930  *
3931  * The caller must ensure that the workqueue on which @work was last
3932  * queued can't be destroyed before this function returns.
3933  *
3934  * Return:
3935  * %true if @work was pending, %false otherwise.
3936  */
3937 bool cancel_work_sync(struct work_struct *work)
3938 {
3939 	return __cancel_work_timer(work, false);
3940 }
3941 EXPORT_SYMBOL_GPL(cancel_work_sync);
3942 
3943 /**
3944  * flush_delayed_work - wait for a dwork to finish executing the last queueing
3945  * @dwork: the delayed work to flush
3946  *
3947  * Delayed timer is cancelled and the pending work is queued for
3948  * immediate execution.  Like flush_work(), this function only
3949  * considers the last queueing instance of @dwork.
3950  *
3951  * Return:
3952  * %true if flush_work() waited for the work to finish execution,
3953  * %false if it was already idle.
3954  */
3955 bool flush_delayed_work(struct delayed_work *dwork)
3956 {
3957 	local_irq_disable();
3958 	if (del_timer_sync(&dwork->timer))
3959 		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
3960 	local_irq_enable();
3961 	return flush_work(&dwork->work);
3962 }
3963 EXPORT_SYMBOL(flush_delayed_work);
3964 
3965 /**
3966  * flush_rcu_work - wait for a rwork to finish executing the last queueing
3967  * @rwork: the rcu work to flush
3968  *
3969  * Return:
3970  * %true if flush_rcu_work() waited for the work to finish execution,
3971  * %false if it was already idle.
3972  */
3973 bool flush_rcu_work(struct rcu_work *rwork)
3974 {
3975 	if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3976 		rcu_barrier();
3977 		flush_work(&rwork->work);
3978 		return true;
3979 	} else {
3980 		return flush_work(&rwork->work);
3981 	}
3982 }
3983 EXPORT_SYMBOL(flush_rcu_work);
3984 
3985 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3986 {
3987 	unsigned long flags;
3988 	int ret;
3989 
3990 	do {
3991 		ret = try_to_grab_pending(work, is_dwork, &flags);
3992 	} while (unlikely(ret == -EAGAIN));
3993 
3994 	if (unlikely(ret < 0))
3995 		return false;
3996 
3997 	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3998 	local_irq_restore(flags);
3999 	return ret;
4000 }
4001 
4002 /*
4003  * See cancel_delayed_work()
4004  */
4005 bool cancel_work(struct work_struct *work)
4006 {
4007 	return __cancel_work(work, false);
4008 }
4009 EXPORT_SYMBOL(cancel_work);
4010 
4011 /**
4012  * cancel_delayed_work - cancel a delayed work
4013  * @dwork: delayed_work to cancel
4014  *
4015  * Kill off a pending delayed_work.
4016  *
4017  * Return: %true if @dwork was pending and canceled; %false if it wasn't
4018  * pending.
4019  *
4020  * Note:
4021  * The work callback function may still be running on return, unless
4022  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
4023  * use cancel_delayed_work_sync() to wait on it.
4024  *
4025  * This function is safe to call from any context including IRQ handler.
4026  */
4027 bool cancel_delayed_work(struct delayed_work *dwork)
4028 {
4029 	return __cancel_work(&dwork->work, true);
4030 }
4031 EXPORT_SYMBOL(cancel_delayed_work);
4032 
4033 /**
4034  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
4035  * @dwork: the delayed work cancel
4036  *
4037  * This is cancel_work_sync() for delayed works.
4038  *
4039  * Return:
4040  * %true if @dwork was pending, %false otherwise.
4041  */
4042 bool cancel_delayed_work_sync(struct delayed_work *dwork)
4043 {
4044 	return __cancel_work_timer(&dwork->work, true);
4045 }
4046 EXPORT_SYMBOL(cancel_delayed_work_sync);
4047 
4048 /**
4049  * schedule_on_each_cpu - execute a function synchronously on each online CPU
4050  * @func: the function to call
4051  *
4052  * schedule_on_each_cpu() executes @func on each online CPU using the
4053  * system workqueue and blocks until all CPUs have completed.
4054  * schedule_on_each_cpu() is very slow.
4055  *
4056  * Return:
4057  * 0 on success, -errno on failure.
4058  */
4059 int schedule_on_each_cpu(work_func_t func)
4060 {
4061 	int cpu;
4062 	struct work_struct __percpu *works;
4063 
4064 	works = alloc_percpu(struct work_struct);
4065 	if (!works)
4066 		return -ENOMEM;
4067 
4068 	cpus_read_lock();
4069 
4070 	for_each_online_cpu(cpu) {
4071 		struct work_struct *work = per_cpu_ptr(works, cpu);
4072 
4073 		INIT_WORK(work, func);
4074 		schedule_work_on(cpu, work);
4075 	}
4076 
4077 	for_each_online_cpu(cpu)
4078 		flush_work(per_cpu_ptr(works, cpu));
4079 
4080 	cpus_read_unlock();
4081 	free_percpu(works);
4082 	return 0;
4083 }
4084 
4085 /**
4086  * execute_in_process_context - reliably execute the routine with user context
4087  * @fn:		the function to execute
4088  * @ew:		guaranteed storage for the execute work structure (must
4089  *		be available when the work executes)
4090  *
4091  * Executes the function immediately if process context is available,
4092  * otherwise schedules the function for delayed execution.
4093  *
4094  * Return:	0 - function was executed
4095  *		1 - function was scheduled for execution
4096  */
4097 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
4098 {
4099 	if (!in_interrupt()) {
4100 		fn(&ew->work);
4101 		return 0;
4102 	}
4103 
4104 	INIT_WORK(&ew->work, fn);
4105 	schedule_work(&ew->work);
4106 
4107 	return 1;
4108 }
4109 EXPORT_SYMBOL_GPL(execute_in_process_context);
4110 
4111 /**
4112  * free_workqueue_attrs - free a workqueue_attrs
4113  * @attrs: workqueue_attrs to free
4114  *
4115  * Undo alloc_workqueue_attrs().
4116  */
4117 void free_workqueue_attrs(struct workqueue_attrs *attrs)
4118 {
4119 	if (attrs) {
4120 		free_cpumask_var(attrs->cpumask);
4121 		free_cpumask_var(attrs->__pod_cpumask);
4122 		kfree(attrs);
4123 	}
4124 }
4125 
4126 /**
4127  * alloc_workqueue_attrs - allocate a workqueue_attrs
4128  *
4129  * Allocate a new workqueue_attrs, initialize with default settings and
4130  * return it.
4131  *
4132  * Return: The allocated new workqueue_attr on success. %NULL on failure.
4133  */
4134 struct workqueue_attrs *alloc_workqueue_attrs(void)
4135 {
4136 	struct workqueue_attrs *attrs;
4137 
4138 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
4139 	if (!attrs)
4140 		goto fail;
4141 	if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
4142 		goto fail;
4143 	if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
4144 		goto fail;
4145 
4146 	cpumask_copy(attrs->cpumask, cpu_possible_mask);
4147 	attrs->affn_scope = WQ_AFFN_DFL;
4148 	return attrs;
4149 fail:
4150 	free_workqueue_attrs(attrs);
4151 	return NULL;
4152 }
4153 
4154 static void copy_workqueue_attrs(struct workqueue_attrs *to,
4155 				 const struct workqueue_attrs *from)
4156 {
4157 	to->nice = from->nice;
4158 	cpumask_copy(to->cpumask, from->cpumask);
4159 	cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
4160 	to->affn_strict = from->affn_strict;
4161 
4162 	/*
4163 	 * Unlike hash and equality test, copying shouldn't ignore wq-only
4164 	 * fields as copying is used for both pool and wq attrs. Instead,
4165 	 * get_unbound_pool() explicitly clears the fields.
4166 	 */
4167 	to->affn_scope = from->affn_scope;
4168 	to->ordered = from->ordered;
4169 }
4170 
4171 /*
4172  * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
4173  * comments in 'struct workqueue_attrs' definition.
4174  */
4175 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
4176 {
4177 	attrs->affn_scope = WQ_AFFN_NR_TYPES;
4178 	attrs->ordered = false;
4179 }
4180 
4181 /* hash value of the content of @attr */
4182 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
4183 {
4184 	u32 hash = 0;
4185 
4186 	hash = jhash_1word(attrs->nice, hash);
4187 	hash = jhash(cpumask_bits(attrs->cpumask),
4188 		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
4189 	hash = jhash(cpumask_bits(attrs->__pod_cpumask),
4190 		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
4191 	hash = jhash_1word(attrs->affn_strict, hash);
4192 	return hash;
4193 }
4194 
4195 /* content equality test */
4196 static bool wqattrs_equal(const struct workqueue_attrs *a,
4197 			  const struct workqueue_attrs *b)
4198 {
4199 	if (a->nice != b->nice)
4200 		return false;
4201 	if (!cpumask_equal(a->cpumask, b->cpumask))
4202 		return false;
4203 	if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
4204 		return false;
4205 	if (a->affn_strict != b->affn_strict)
4206 		return false;
4207 	return true;
4208 }
4209 
4210 /* Update @attrs with actually available CPUs */
4211 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
4212 				      const cpumask_t *unbound_cpumask)
4213 {
4214 	/*
4215 	 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
4216 	 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
4217 	 * @unbound_cpumask.
4218 	 */
4219 	cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
4220 	if (unlikely(cpumask_empty(attrs->cpumask)))
4221 		cpumask_copy(attrs->cpumask, unbound_cpumask);
4222 }
4223 
4224 /* find wq_pod_type to use for @attrs */
4225 static const struct wq_pod_type *
4226 wqattrs_pod_type(const struct workqueue_attrs *attrs)
4227 {
4228 	enum wq_affn_scope scope;
4229 	struct wq_pod_type *pt;
4230 
4231 	/* to synchronize access to wq_affn_dfl */
4232 	lockdep_assert_held(&wq_pool_mutex);
4233 
4234 	if (attrs->affn_scope == WQ_AFFN_DFL)
4235 		scope = wq_affn_dfl;
4236 	else
4237 		scope = attrs->affn_scope;
4238 
4239 	pt = &wq_pod_types[scope];
4240 
4241 	if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
4242 	    likely(pt->nr_pods))
4243 		return pt;
4244 
4245 	/*
4246 	 * Before workqueue_init_topology(), only SYSTEM is available which is
4247 	 * initialized in workqueue_init_early().
4248 	 */
4249 	pt = &wq_pod_types[WQ_AFFN_SYSTEM];
4250 	BUG_ON(!pt->nr_pods);
4251 	return pt;
4252 }
4253 
4254 /**
4255  * init_worker_pool - initialize a newly zalloc'd worker_pool
4256  * @pool: worker_pool to initialize
4257  *
4258  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
4259  *
4260  * Return: 0 on success, -errno on failure.  Even on failure, all fields
4261  * inside @pool proper are initialized and put_unbound_pool() can be called
4262  * on @pool safely to release it.
4263  */
4264 static int init_worker_pool(struct worker_pool *pool)
4265 {
4266 	raw_spin_lock_init(&pool->lock);
4267 	pool->id = -1;
4268 	pool->cpu = -1;
4269 	pool->node = NUMA_NO_NODE;
4270 	pool->flags |= POOL_DISASSOCIATED;
4271 	pool->watchdog_ts = jiffies;
4272 	INIT_LIST_HEAD(&pool->worklist);
4273 	INIT_LIST_HEAD(&pool->idle_list);
4274 	hash_init(pool->busy_hash);
4275 
4276 	timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
4277 	INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
4278 
4279 	timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
4280 
4281 	INIT_LIST_HEAD(&pool->workers);
4282 	INIT_LIST_HEAD(&pool->dying_workers);
4283 
4284 	ida_init(&pool->worker_ida);
4285 	INIT_HLIST_NODE(&pool->hash_node);
4286 	pool->refcnt = 1;
4287 
4288 	/* shouldn't fail above this point */
4289 	pool->attrs = alloc_workqueue_attrs();
4290 	if (!pool->attrs)
4291 		return -ENOMEM;
4292 
4293 	wqattrs_clear_for_pool(pool->attrs);
4294 
4295 	return 0;
4296 }
4297 
4298 #ifdef CONFIG_LOCKDEP
4299 static void wq_init_lockdep(struct workqueue_struct *wq)
4300 {
4301 	char *lock_name;
4302 
4303 	lockdep_register_key(&wq->key);
4304 	lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
4305 	if (!lock_name)
4306 		lock_name = wq->name;
4307 
4308 	wq->lock_name = lock_name;
4309 	lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
4310 }
4311 
4312 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4313 {
4314 	lockdep_unregister_key(&wq->key);
4315 }
4316 
4317 static void wq_free_lockdep(struct workqueue_struct *wq)
4318 {
4319 	if (wq->lock_name != wq->name)
4320 		kfree(wq->lock_name);
4321 }
4322 #else
4323 static void wq_init_lockdep(struct workqueue_struct *wq)
4324 {
4325 }
4326 
4327 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4328 {
4329 }
4330 
4331 static void wq_free_lockdep(struct workqueue_struct *wq)
4332 {
4333 }
4334 #endif
4335 
4336 static void free_node_nr_active(struct wq_node_nr_active **nna_ar)
4337 {
4338 	int node;
4339 
4340 	for_each_node(node) {
4341 		kfree(nna_ar[node]);
4342 		nna_ar[node] = NULL;
4343 	}
4344 
4345 	kfree(nna_ar[nr_node_ids]);
4346 	nna_ar[nr_node_ids] = NULL;
4347 }
4348 
4349 static void init_node_nr_active(struct wq_node_nr_active *nna)
4350 {
4351 	atomic_set(&nna->nr, 0);
4352 	raw_spin_lock_init(&nna->lock);
4353 	INIT_LIST_HEAD(&nna->pending_pwqs);
4354 }
4355 
4356 /*
4357  * Each node's nr_active counter will be accessed mostly from its own node and
4358  * should be allocated in the node.
4359  */
4360 static int alloc_node_nr_active(struct wq_node_nr_active **nna_ar)
4361 {
4362 	struct wq_node_nr_active *nna;
4363 	int node;
4364 
4365 	for_each_node(node) {
4366 		nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node);
4367 		if (!nna)
4368 			goto err_free;
4369 		init_node_nr_active(nna);
4370 		nna_ar[node] = nna;
4371 	}
4372 
4373 	/* [nr_node_ids] is used as the fallback */
4374 	nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, NUMA_NO_NODE);
4375 	if (!nna)
4376 		goto err_free;
4377 	init_node_nr_active(nna);
4378 	nna_ar[nr_node_ids] = nna;
4379 
4380 	return 0;
4381 
4382 err_free:
4383 	free_node_nr_active(nna_ar);
4384 	return -ENOMEM;
4385 }
4386 
4387 static void rcu_free_wq(struct rcu_head *rcu)
4388 {
4389 	struct workqueue_struct *wq =
4390 		container_of(rcu, struct workqueue_struct, rcu);
4391 
4392 	if (wq->flags & WQ_UNBOUND)
4393 		free_node_nr_active(wq->node_nr_active);
4394 
4395 	wq_free_lockdep(wq);
4396 	free_percpu(wq->cpu_pwq);
4397 	free_workqueue_attrs(wq->unbound_attrs);
4398 	kfree(wq);
4399 }
4400 
4401 static void rcu_free_pool(struct rcu_head *rcu)
4402 {
4403 	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
4404 
4405 	ida_destroy(&pool->worker_ida);
4406 	free_workqueue_attrs(pool->attrs);
4407 	kfree(pool);
4408 }
4409 
4410 /**
4411  * put_unbound_pool - put a worker_pool
4412  * @pool: worker_pool to put
4413  *
4414  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
4415  * safe manner.  get_unbound_pool() calls this function on its failure path
4416  * and this function should be able to release pools which went through,
4417  * successfully or not, init_worker_pool().
4418  *
4419  * Should be called with wq_pool_mutex held.
4420  */
4421 static void put_unbound_pool(struct worker_pool *pool)
4422 {
4423 	DECLARE_COMPLETION_ONSTACK(detach_completion);
4424 	struct worker *worker;
4425 	LIST_HEAD(cull_list);
4426 
4427 	lockdep_assert_held(&wq_pool_mutex);
4428 
4429 	if (--pool->refcnt)
4430 		return;
4431 
4432 	/* sanity checks */
4433 	if (WARN_ON(!(pool->cpu < 0)) ||
4434 	    WARN_ON(!list_empty(&pool->worklist)))
4435 		return;
4436 
4437 	/* release id and unhash */
4438 	if (pool->id >= 0)
4439 		idr_remove(&worker_pool_idr, pool->id);
4440 	hash_del(&pool->hash_node);
4441 
4442 	/*
4443 	 * Become the manager and destroy all workers.  This prevents
4444 	 * @pool's workers from blocking on attach_mutex.  We're the last
4445 	 * manager and @pool gets freed with the flag set.
4446 	 *
4447 	 * Having a concurrent manager is quite unlikely to happen as we can
4448 	 * only get here with
4449 	 *   pwq->refcnt == pool->refcnt == 0
4450 	 * which implies no work queued to the pool, which implies no worker can
4451 	 * become the manager. However a worker could have taken the role of
4452 	 * manager before the refcnts dropped to 0, since maybe_create_worker()
4453 	 * drops pool->lock
4454 	 */
4455 	while (true) {
4456 		rcuwait_wait_event(&manager_wait,
4457 				   !(pool->flags & POOL_MANAGER_ACTIVE),
4458 				   TASK_UNINTERRUPTIBLE);
4459 
4460 		mutex_lock(&wq_pool_attach_mutex);
4461 		raw_spin_lock_irq(&pool->lock);
4462 		if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
4463 			pool->flags |= POOL_MANAGER_ACTIVE;
4464 			break;
4465 		}
4466 		raw_spin_unlock_irq(&pool->lock);
4467 		mutex_unlock(&wq_pool_attach_mutex);
4468 	}
4469 
4470 	while ((worker = first_idle_worker(pool)))
4471 		set_worker_dying(worker, &cull_list);
4472 	WARN_ON(pool->nr_workers || pool->nr_idle);
4473 	raw_spin_unlock_irq(&pool->lock);
4474 
4475 	wake_dying_workers(&cull_list);
4476 
4477 	if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
4478 		pool->detach_completion = &detach_completion;
4479 	mutex_unlock(&wq_pool_attach_mutex);
4480 
4481 	if (pool->detach_completion)
4482 		wait_for_completion(pool->detach_completion);
4483 
4484 	/* shut down the timers */
4485 	del_timer_sync(&pool->idle_timer);
4486 	cancel_work_sync(&pool->idle_cull_work);
4487 	del_timer_sync(&pool->mayday_timer);
4488 
4489 	/* RCU protected to allow dereferences from get_work_pool() */
4490 	call_rcu(&pool->rcu, rcu_free_pool);
4491 }
4492 
4493 /**
4494  * get_unbound_pool - get a worker_pool with the specified attributes
4495  * @attrs: the attributes of the worker_pool to get
4496  *
4497  * Obtain a worker_pool which has the same attributes as @attrs, bump the
4498  * reference count and return it.  If there already is a matching
4499  * worker_pool, it will be used; otherwise, this function attempts to
4500  * create a new one.
4501  *
4502  * Should be called with wq_pool_mutex held.
4503  *
4504  * Return: On success, a worker_pool with the same attributes as @attrs.
4505  * On failure, %NULL.
4506  */
4507 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
4508 {
4509 	struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
4510 	u32 hash = wqattrs_hash(attrs);
4511 	struct worker_pool *pool;
4512 	int pod, node = NUMA_NO_NODE;
4513 
4514 	lockdep_assert_held(&wq_pool_mutex);
4515 
4516 	/* do we already have a matching pool? */
4517 	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4518 		if (wqattrs_equal(pool->attrs, attrs)) {
4519 			pool->refcnt++;
4520 			return pool;
4521 		}
4522 	}
4523 
4524 	/* If __pod_cpumask is contained inside a NUMA pod, that's our node */
4525 	for (pod = 0; pod < pt->nr_pods; pod++) {
4526 		if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
4527 			node = pt->pod_node[pod];
4528 			break;
4529 		}
4530 	}
4531 
4532 	/* nope, create a new one */
4533 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
4534 	if (!pool || init_worker_pool(pool) < 0)
4535 		goto fail;
4536 
4537 	pool->node = node;
4538 	copy_workqueue_attrs(pool->attrs, attrs);
4539 	wqattrs_clear_for_pool(pool->attrs);
4540 
4541 	if (worker_pool_assign_id(pool) < 0)
4542 		goto fail;
4543 
4544 	/* create and start the initial worker */
4545 	if (wq_online && !create_worker(pool))
4546 		goto fail;
4547 
4548 	/* install */
4549 	hash_add(unbound_pool_hash, &pool->hash_node, hash);
4550 
4551 	return pool;
4552 fail:
4553 	if (pool)
4554 		put_unbound_pool(pool);
4555 	return NULL;
4556 }
4557 
4558 static void rcu_free_pwq(struct rcu_head *rcu)
4559 {
4560 	kmem_cache_free(pwq_cache,
4561 			container_of(rcu, struct pool_workqueue, rcu));
4562 }
4563 
4564 /*
4565  * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
4566  * refcnt and needs to be destroyed.
4567  */
4568 static void pwq_release_workfn(struct kthread_work *work)
4569 {
4570 	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
4571 						  release_work);
4572 	struct workqueue_struct *wq = pwq->wq;
4573 	struct worker_pool *pool = pwq->pool;
4574 	bool is_last = false;
4575 
4576 	/*
4577 	 * When @pwq is not linked, it doesn't hold any reference to the
4578 	 * @wq, and @wq is invalid to access.
4579 	 */
4580 	if (!list_empty(&pwq->pwqs_node)) {
4581 		mutex_lock(&wq->mutex);
4582 		list_del_rcu(&pwq->pwqs_node);
4583 		is_last = list_empty(&wq->pwqs);
4584 		mutex_unlock(&wq->mutex);
4585 	}
4586 
4587 	if (wq->flags & WQ_UNBOUND) {
4588 		mutex_lock(&wq_pool_mutex);
4589 		put_unbound_pool(pool);
4590 		mutex_unlock(&wq_pool_mutex);
4591 	}
4592 
4593 	if (!list_empty(&pwq->pending_node)) {
4594 		struct wq_node_nr_active *nna =
4595 			wq_node_nr_active(pwq->wq, pwq->pool->node);
4596 
4597 		raw_spin_lock_irq(&nna->lock);
4598 		list_del_init(&pwq->pending_node);
4599 		raw_spin_unlock_irq(&nna->lock);
4600 	}
4601 
4602 	call_rcu(&pwq->rcu, rcu_free_pwq);
4603 
4604 	/*
4605 	 * If we're the last pwq going away, @wq is already dead and no one
4606 	 * is gonna access it anymore.  Schedule RCU free.
4607 	 */
4608 	if (is_last) {
4609 		wq_unregister_lockdep(wq);
4610 		call_rcu(&wq->rcu, rcu_free_wq);
4611 	}
4612 }
4613 
4614 /* initialize newly allocated @pwq which is associated with @wq and @pool */
4615 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
4616 		     struct worker_pool *pool)
4617 {
4618 	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
4619 
4620 	memset(pwq, 0, sizeof(*pwq));
4621 
4622 	pwq->pool = pool;
4623 	pwq->wq = wq;
4624 	pwq->flush_color = -1;
4625 	pwq->refcnt = 1;
4626 	INIT_LIST_HEAD(&pwq->inactive_works);
4627 	INIT_LIST_HEAD(&pwq->pending_node);
4628 	INIT_LIST_HEAD(&pwq->pwqs_node);
4629 	INIT_LIST_HEAD(&pwq->mayday_node);
4630 	kthread_init_work(&pwq->release_work, pwq_release_workfn);
4631 }
4632 
4633 /* sync @pwq with the current state of its associated wq and link it */
4634 static void link_pwq(struct pool_workqueue *pwq)
4635 {
4636 	struct workqueue_struct *wq = pwq->wq;
4637 
4638 	lockdep_assert_held(&wq->mutex);
4639 
4640 	/* may be called multiple times, ignore if already linked */
4641 	if (!list_empty(&pwq->pwqs_node))
4642 		return;
4643 
4644 	/* set the matching work_color */
4645 	pwq->work_color = wq->work_color;
4646 
4647 	/* link in @pwq */
4648 	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
4649 }
4650 
4651 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
4652 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
4653 					const struct workqueue_attrs *attrs)
4654 {
4655 	struct worker_pool *pool;
4656 	struct pool_workqueue *pwq;
4657 
4658 	lockdep_assert_held(&wq_pool_mutex);
4659 
4660 	pool = get_unbound_pool(attrs);
4661 	if (!pool)
4662 		return NULL;
4663 
4664 	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
4665 	if (!pwq) {
4666 		put_unbound_pool(pool);
4667 		return NULL;
4668 	}
4669 
4670 	init_pwq(pwq, wq, pool);
4671 	return pwq;
4672 }
4673 
4674 /**
4675  * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
4676  * @attrs: the wq_attrs of the default pwq of the target workqueue
4677  * @cpu: the target CPU
4678  * @cpu_going_down: if >= 0, the CPU to consider as offline
4679  *
4680  * Calculate the cpumask a workqueue with @attrs should use on @pod. If
4681  * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
4682  * The result is stored in @attrs->__pod_cpumask.
4683  *
4684  * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
4685  * and @pod has online CPUs requested by @attrs, the returned cpumask is the
4686  * intersection of the possible CPUs of @pod and @attrs->cpumask.
4687  *
4688  * The caller is responsible for ensuring that the cpumask of @pod stays stable.
4689  */
4690 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
4691 				int cpu_going_down)
4692 {
4693 	const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
4694 	int pod = pt->cpu_pod[cpu];
4695 
4696 	/* does @pod have any online CPUs @attrs wants? */
4697 	cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
4698 	cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
4699 	if (cpu_going_down >= 0)
4700 		cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
4701 
4702 	if (cpumask_empty(attrs->__pod_cpumask)) {
4703 		cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
4704 		return;
4705 	}
4706 
4707 	/* yeap, return possible CPUs in @pod that @attrs wants */
4708 	cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
4709 
4710 	if (cpumask_empty(attrs->__pod_cpumask))
4711 		pr_warn_once("WARNING: workqueue cpumask: online intersect > "
4712 				"possible intersect\n");
4713 }
4714 
4715 /* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */
4716 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
4717 					int cpu, struct pool_workqueue *pwq)
4718 {
4719 	struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu);
4720 	struct pool_workqueue *old_pwq;
4721 
4722 	lockdep_assert_held(&wq_pool_mutex);
4723 	lockdep_assert_held(&wq->mutex);
4724 
4725 	/* link_pwq() can handle duplicate calls */
4726 	link_pwq(pwq);
4727 
4728 	old_pwq = rcu_access_pointer(*slot);
4729 	rcu_assign_pointer(*slot, pwq);
4730 	return old_pwq;
4731 }
4732 
4733 /* context to store the prepared attrs & pwqs before applying */
4734 struct apply_wqattrs_ctx {
4735 	struct workqueue_struct	*wq;		/* target workqueue */
4736 	struct workqueue_attrs	*attrs;		/* attrs to apply */
4737 	struct list_head	list;		/* queued for batching commit */
4738 	struct pool_workqueue	*dfl_pwq;
4739 	struct pool_workqueue	*pwq_tbl[];
4740 };
4741 
4742 /* free the resources after success or abort */
4743 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
4744 {
4745 	if (ctx) {
4746 		int cpu;
4747 
4748 		for_each_possible_cpu(cpu)
4749 			put_pwq_unlocked(ctx->pwq_tbl[cpu]);
4750 		put_pwq_unlocked(ctx->dfl_pwq);
4751 
4752 		free_workqueue_attrs(ctx->attrs);
4753 
4754 		kfree(ctx);
4755 	}
4756 }
4757 
4758 /* allocate the attrs and pwqs for later installation */
4759 static struct apply_wqattrs_ctx *
4760 apply_wqattrs_prepare(struct workqueue_struct *wq,
4761 		      const struct workqueue_attrs *attrs,
4762 		      const cpumask_var_t unbound_cpumask)
4763 {
4764 	struct apply_wqattrs_ctx *ctx;
4765 	struct workqueue_attrs *new_attrs;
4766 	int cpu;
4767 
4768 	lockdep_assert_held(&wq_pool_mutex);
4769 
4770 	if (WARN_ON(attrs->affn_scope < 0 ||
4771 		    attrs->affn_scope >= WQ_AFFN_NR_TYPES))
4772 		return ERR_PTR(-EINVAL);
4773 
4774 	ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
4775 
4776 	new_attrs = alloc_workqueue_attrs();
4777 	if (!ctx || !new_attrs)
4778 		goto out_free;
4779 
4780 	/*
4781 	 * If something goes wrong during CPU up/down, we'll fall back to
4782 	 * the default pwq covering whole @attrs->cpumask.  Always create
4783 	 * it even if we don't use it immediately.
4784 	 */
4785 	copy_workqueue_attrs(new_attrs, attrs);
4786 	wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
4787 	cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
4788 	ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
4789 	if (!ctx->dfl_pwq)
4790 		goto out_free;
4791 
4792 	for_each_possible_cpu(cpu) {
4793 		if (new_attrs->ordered) {
4794 			ctx->dfl_pwq->refcnt++;
4795 			ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
4796 		} else {
4797 			wq_calc_pod_cpumask(new_attrs, cpu, -1);
4798 			ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
4799 			if (!ctx->pwq_tbl[cpu])
4800 				goto out_free;
4801 		}
4802 	}
4803 
4804 	/* save the user configured attrs and sanitize it. */
4805 	copy_workqueue_attrs(new_attrs, attrs);
4806 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
4807 	cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
4808 	ctx->attrs = new_attrs;
4809 
4810 	ctx->wq = wq;
4811 	return ctx;
4812 
4813 out_free:
4814 	free_workqueue_attrs(new_attrs);
4815 	apply_wqattrs_cleanup(ctx);
4816 	return ERR_PTR(-ENOMEM);
4817 }
4818 
4819 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
4820 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4821 {
4822 	int cpu;
4823 
4824 	/* all pwqs have been created successfully, let's install'em */
4825 	mutex_lock(&ctx->wq->mutex);
4826 
4827 	copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4828 
4829 	/* save the previous pwqs and install the new ones */
4830 	for_each_possible_cpu(cpu)
4831 		ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
4832 							ctx->pwq_tbl[cpu]);
4833 	ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq);
4834 
4835 	/* update node_nr_active->max */
4836 	wq_update_node_max_active(ctx->wq, -1);
4837 
4838 	mutex_unlock(&ctx->wq->mutex);
4839 }
4840 
4841 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4842 					const struct workqueue_attrs *attrs)
4843 {
4844 	struct apply_wqattrs_ctx *ctx;
4845 
4846 	/* only unbound workqueues can change attributes */
4847 	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4848 		return -EINVAL;
4849 
4850 	/* creating multiple pwqs breaks ordering guarantee */
4851 	if (!list_empty(&wq->pwqs)) {
4852 		if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4853 			return -EINVAL;
4854 
4855 		wq->flags &= ~__WQ_ORDERED;
4856 	}
4857 
4858 	ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
4859 	if (IS_ERR(ctx))
4860 		return PTR_ERR(ctx);
4861 
4862 	/* the ctx has been prepared successfully, let's commit it */
4863 	apply_wqattrs_commit(ctx);
4864 	apply_wqattrs_cleanup(ctx);
4865 
4866 	return 0;
4867 }
4868 
4869 /**
4870  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4871  * @wq: the target workqueue
4872  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4873  *
4874  * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
4875  * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
4876  * work items are affine to the pod it was issued on. Older pwqs are released as
4877  * in-flight work items finish. Note that a work item which repeatedly requeues
4878  * itself back-to-back will stay on its current pwq.
4879  *
4880  * Performs GFP_KERNEL allocations.
4881  *
4882  * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
4883  *
4884  * Return: 0 on success and -errno on failure.
4885  */
4886 int apply_workqueue_attrs(struct workqueue_struct *wq,
4887 			  const struct workqueue_attrs *attrs)
4888 {
4889 	int ret;
4890 
4891 	lockdep_assert_cpus_held();
4892 
4893 	mutex_lock(&wq_pool_mutex);
4894 	ret = apply_workqueue_attrs_locked(wq, attrs);
4895 	mutex_unlock(&wq_pool_mutex);
4896 
4897 	return ret;
4898 }
4899 
4900 /**
4901  * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
4902  * @wq: the target workqueue
4903  * @cpu: the CPU to update pool association for
4904  * @hotplug_cpu: the CPU coming up or going down
4905  * @online: whether @cpu is coming up or going down
4906  *
4907  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4908  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update pod affinity of
4909  * @wq accordingly.
4910  *
4911  *
4912  * If pod affinity can't be adjusted due to memory allocation failure, it falls
4913  * back to @wq->dfl_pwq which may not be optimal but is always correct.
4914  *
4915  * Note that when the last allowed CPU of a pod goes offline for a workqueue
4916  * with a cpumask spanning multiple pods, the workers which were already
4917  * executing the work items for the workqueue will lose their CPU affinity and
4918  * may execute on any CPU. This is similar to how per-cpu workqueues behave on
4919  * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
4920  * responsibility to flush the work item from CPU_DOWN_PREPARE.
4921  */
4922 static void wq_update_pod(struct workqueue_struct *wq, int cpu,
4923 			  int hotplug_cpu, bool online)
4924 {
4925 	int off_cpu = online ? -1 : hotplug_cpu;
4926 	struct pool_workqueue *old_pwq = NULL, *pwq;
4927 	struct workqueue_attrs *target_attrs;
4928 
4929 	lockdep_assert_held(&wq_pool_mutex);
4930 
4931 	if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
4932 		return;
4933 
4934 	/*
4935 	 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4936 	 * Let's use a preallocated one.  The following buf is protected by
4937 	 * CPU hotplug exclusion.
4938 	 */
4939 	target_attrs = wq_update_pod_attrs_buf;
4940 
4941 	copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4942 	wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
4943 
4944 	/* nothing to do if the target cpumask matches the current pwq */
4945 	wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
4946 	if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs))
4947 		return;
4948 
4949 	/* create a new pwq */
4950 	pwq = alloc_unbound_pwq(wq, target_attrs);
4951 	if (!pwq) {
4952 		pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
4953 			wq->name);
4954 		goto use_dfl_pwq;
4955 	}
4956 
4957 	/* Install the new pwq. */
4958 	mutex_lock(&wq->mutex);
4959 	old_pwq = install_unbound_pwq(wq, cpu, pwq);
4960 	goto out_unlock;
4961 
4962 use_dfl_pwq:
4963 	mutex_lock(&wq->mutex);
4964 	pwq = unbound_pwq(wq, -1);
4965 	raw_spin_lock_irq(&pwq->pool->lock);
4966 	get_pwq(pwq);
4967 	raw_spin_unlock_irq(&pwq->pool->lock);
4968 	old_pwq = install_unbound_pwq(wq, cpu, pwq);
4969 out_unlock:
4970 	mutex_unlock(&wq->mutex);
4971 	put_pwq_unlocked(old_pwq);
4972 }
4973 
4974 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4975 {
4976 	bool highpri = wq->flags & WQ_HIGHPRI;
4977 	int cpu, ret;
4978 
4979 	wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
4980 	if (!wq->cpu_pwq)
4981 		goto enomem;
4982 
4983 	if (!(wq->flags & WQ_UNBOUND)) {
4984 		for_each_possible_cpu(cpu) {
4985 			struct pool_workqueue **pwq_p =
4986 				per_cpu_ptr(wq->cpu_pwq, cpu);
4987 			struct worker_pool *pool =
4988 				&(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
4989 
4990 			*pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
4991 						       pool->node);
4992 			if (!*pwq_p)
4993 				goto enomem;
4994 
4995 			init_pwq(*pwq_p, wq, pool);
4996 
4997 			mutex_lock(&wq->mutex);
4998 			link_pwq(*pwq_p);
4999 			mutex_unlock(&wq->mutex);
5000 		}
5001 		return 0;
5002 	}
5003 
5004 	cpus_read_lock();
5005 	if (wq->flags & __WQ_ORDERED) {
5006 		struct pool_workqueue *dfl_pwq;
5007 
5008 		ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
5009 		/* there should only be single pwq for ordering guarantee */
5010 		dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
5011 		WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node ||
5012 			      wq->pwqs.prev != &dfl_pwq->pwqs_node),
5013 		     "ordering guarantee broken for workqueue %s\n", wq->name);
5014 	} else {
5015 		ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
5016 	}
5017 	cpus_read_unlock();
5018 
5019 	/* for unbound pwq, flush the pwq_release_worker ensures that the
5020 	 * pwq_release_workfn() completes before calling kfree(wq).
5021 	 */
5022 	if (ret)
5023 		kthread_flush_worker(pwq_release_worker);
5024 
5025 	return ret;
5026 
5027 enomem:
5028 	if (wq->cpu_pwq) {
5029 		for_each_possible_cpu(cpu) {
5030 			struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
5031 
5032 			if (pwq)
5033 				kmem_cache_free(pwq_cache, pwq);
5034 		}
5035 		free_percpu(wq->cpu_pwq);
5036 		wq->cpu_pwq = NULL;
5037 	}
5038 	return -ENOMEM;
5039 }
5040 
5041 static int wq_clamp_max_active(int max_active, unsigned int flags,
5042 			       const char *name)
5043 {
5044 	if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
5045 		pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
5046 			max_active, name, 1, WQ_MAX_ACTIVE);
5047 
5048 	return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
5049 }
5050 
5051 /*
5052  * Workqueues which may be used during memory reclaim should have a rescuer
5053  * to guarantee forward progress.
5054  */
5055 static int init_rescuer(struct workqueue_struct *wq)
5056 {
5057 	struct worker *rescuer;
5058 	int ret;
5059 
5060 	if (!(wq->flags & WQ_MEM_RECLAIM))
5061 		return 0;
5062 
5063 	rescuer = alloc_worker(NUMA_NO_NODE);
5064 	if (!rescuer) {
5065 		pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
5066 		       wq->name);
5067 		return -ENOMEM;
5068 	}
5069 
5070 	rescuer->rescue_wq = wq;
5071 	rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
5072 	if (IS_ERR(rescuer->task)) {
5073 		ret = PTR_ERR(rescuer->task);
5074 		pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
5075 		       wq->name, ERR_PTR(ret));
5076 		kfree(rescuer);
5077 		return ret;
5078 	}
5079 
5080 	wq->rescuer = rescuer;
5081 	if (wq->flags & WQ_UNBOUND)
5082 		kthread_bind_mask(rescuer->task, wq->unbound_attrs->cpumask);
5083 	else
5084 		kthread_bind_mask(rescuer->task, cpu_possible_mask);
5085 	wake_up_process(rescuer->task);
5086 
5087 	return 0;
5088 }
5089 
5090 /**
5091  * wq_adjust_max_active - update a wq's max_active to the current setting
5092  * @wq: target workqueue
5093  *
5094  * If @wq isn't freezing, set @wq->max_active to the saved_max_active and
5095  * activate inactive work items accordingly. If @wq is freezing, clear
5096  * @wq->max_active to zero.
5097  */
5098 static void wq_adjust_max_active(struct workqueue_struct *wq)
5099 {
5100 	bool activated;
5101 	int new_max, new_min;
5102 
5103 	lockdep_assert_held(&wq->mutex);
5104 
5105 	if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) {
5106 		new_max = 0;
5107 		new_min = 0;
5108 	} else {
5109 		new_max = wq->saved_max_active;
5110 		new_min = wq->saved_min_active;
5111 	}
5112 
5113 	if (wq->max_active == new_max && wq->min_active == new_min)
5114 		return;
5115 
5116 	/*
5117 	 * Update @wq->max/min_active and then kick inactive work items if more
5118 	 * active work items are allowed. This doesn't break work item ordering
5119 	 * because new work items are always queued behind existing inactive
5120 	 * work items if there are any.
5121 	 */
5122 	WRITE_ONCE(wq->max_active, new_max);
5123 	WRITE_ONCE(wq->min_active, new_min);
5124 
5125 	if (wq->flags & WQ_UNBOUND)
5126 		wq_update_node_max_active(wq, -1);
5127 
5128 	if (new_max == 0)
5129 		return;
5130 
5131 	/*
5132 	 * Round-robin through pwq's activating the first inactive work item
5133 	 * until max_active is filled.
5134 	 */
5135 	do {
5136 		struct pool_workqueue *pwq;
5137 
5138 		activated = false;
5139 		for_each_pwq(pwq, wq) {
5140 			unsigned long flags;
5141 
5142 			/* can be called during early boot w/ irq disabled */
5143 			raw_spin_lock_irqsave(&pwq->pool->lock, flags);
5144 			if (pwq_activate_first_inactive(pwq, true)) {
5145 				activated = true;
5146 				kick_pool(pwq->pool);
5147 			}
5148 			raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
5149 		}
5150 	} while (activated);
5151 }
5152 
5153 __printf(1, 4)
5154 struct workqueue_struct *alloc_workqueue(const char *fmt,
5155 					 unsigned int flags,
5156 					 int max_active, ...)
5157 {
5158 	va_list args;
5159 	struct workqueue_struct *wq;
5160 	size_t wq_size;
5161 	int name_len;
5162 
5163 	/*
5164 	 * Unbound && max_active == 1 used to imply ordered, which is no longer
5165 	 * the case on many machines due to per-pod pools. While
5166 	 * alloc_ordered_workqueue() is the right way to create an ordered
5167 	 * workqueue, keep the previous behavior to avoid subtle breakages.
5168 	 */
5169 	if ((flags & WQ_UNBOUND) && max_active == 1)
5170 		flags |= __WQ_ORDERED;
5171 
5172 	/* see the comment above the definition of WQ_POWER_EFFICIENT */
5173 	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
5174 		flags |= WQ_UNBOUND;
5175 
5176 	/* allocate wq and format name */
5177 	if (flags & WQ_UNBOUND)
5178 		wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1);
5179 	else
5180 		wq_size = sizeof(*wq);
5181 
5182 	wq = kzalloc(wq_size, GFP_KERNEL);
5183 	if (!wq)
5184 		return NULL;
5185 
5186 	if (flags & WQ_UNBOUND) {
5187 		wq->unbound_attrs = alloc_workqueue_attrs();
5188 		if (!wq->unbound_attrs)
5189 			goto err_free_wq;
5190 	}
5191 
5192 	va_start(args, max_active);
5193 	name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
5194 	va_end(args);
5195 
5196 	if (name_len >= WQ_NAME_LEN)
5197 		pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
5198 			     wq->name);
5199 
5200 	max_active = max_active ?: WQ_DFL_ACTIVE;
5201 	max_active = wq_clamp_max_active(max_active, flags, wq->name);
5202 
5203 	/* init wq */
5204 	wq->flags = flags;
5205 	wq->max_active = max_active;
5206 	wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE);
5207 	wq->saved_max_active = wq->max_active;
5208 	wq->saved_min_active = wq->min_active;
5209 	mutex_init(&wq->mutex);
5210 	atomic_set(&wq->nr_pwqs_to_flush, 0);
5211 	INIT_LIST_HEAD(&wq->pwqs);
5212 	INIT_LIST_HEAD(&wq->flusher_queue);
5213 	INIT_LIST_HEAD(&wq->flusher_overflow);
5214 	INIT_LIST_HEAD(&wq->maydays);
5215 
5216 	wq_init_lockdep(wq);
5217 	INIT_LIST_HEAD(&wq->list);
5218 
5219 	if (flags & WQ_UNBOUND) {
5220 		if (alloc_node_nr_active(wq->node_nr_active) < 0)
5221 			goto err_unreg_lockdep;
5222 	}
5223 
5224 	if (alloc_and_link_pwqs(wq) < 0)
5225 		goto err_free_node_nr_active;
5226 
5227 	if (wq_online && init_rescuer(wq) < 0)
5228 		goto err_destroy;
5229 
5230 	if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
5231 		goto err_destroy;
5232 
5233 	/*
5234 	 * wq_pool_mutex protects global freeze state and workqueues list.
5235 	 * Grab it, adjust max_active and add the new @wq to workqueues
5236 	 * list.
5237 	 */
5238 	mutex_lock(&wq_pool_mutex);
5239 
5240 	mutex_lock(&wq->mutex);
5241 	wq_adjust_max_active(wq);
5242 	mutex_unlock(&wq->mutex);
5243 
5244 	list_add_tail_rcu(&wq->list, &workqueues);
5245 
5246 	mutex_unlock(&wq_pool_mutex);
5247 
5248 	return wq;
5249 
5250 err_free_node_nr_active:
5251 	if (wq->flags & WQ_UNBOUND)
5252 		free_node_nr_active(wq->node_nr_active);
5253 err_unreg_lockdep:
5254 	wq_unregister_lockdep(wq);
5255 	wq_free_lockdep(wq);
5256 err_free_wq:
5257 	free_workqueue_attrs(wq->unbound_attrs);
5258 	kfree(wq);
5259 	return NULL;
5260 err_destroy:
5261 	destroy_workqueue(wq);
5262 	return NULL;
5263 }
5264 EXPORT_SYMBOL_GPL(alloc_workqueue);
5265 
5266 static bool pwq_busy(struct pool_workqueue *pwq)
5267 {
5268 	int i;
5269 
5270 	for (i = 0; i < WORK_NR_COLORS; i++)
5271 		if (pwq->nr_in_flight[i])
5272 			return true;
5273 
5274 	if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1))
5275 		return true;
5276 	if (!pwq_is_empty(pwq))
5277 		return true;
5278 
5279 	return false;
5280 }
5281 
5282 /**
5283  * destroy_workqueue - safely terminate a workqueue
5284  * @wq: target workqueue
5285  *
5286  * Safely destroy a workqueue. All work currently pending will be done first.
5287  */
5288 void destroy_workqueue(struct workqueue_struct *wq)
5289 {
5290 	struct pool_workqueue *pwq;
5291 	int cpu;
5292 
5293 	/*
5294 	 * Remove it from sysfs first so that sanity check failure doesn't
5295 	 * lead to sysfs name conflicts.
5296 	 */
5297 	workqueue_sysfs_unregister(wq);
5298 
5299 	/* mark the workqueue destruction is in progress */
5300 	mutex_lock(&wq->mutex);
5301 	wq->flags |= __WQ_DESTROYING;
5302 	mutex_unlock(&wq->mutex);
5303 
5304 	/* drain it before proceeding with destruction */
5305 	drain_workqueue(wq);
5306 
5307 	/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
5308 	if (wq->rescuer) {
5309 		struct worker *rescuer = wq->rescuer;
5310 
5311 		/* this prevents new queueing */
5312 		raw_spin_lock_irq(&wq_mayday_lock);
5313 		wq->rescuer = NULL;
5314 		raw_spin_unlock_irq(&wq_mayday_lock);
5315 
5316 		/* rescuer will empty maydays list before exiting */
5317 		kthread_stop(rescuer->task);
5318 		kfree(rescuer);
5319 	}
5320 
5321 	/*
5322 	 * Sanity checks - grab all the locks so that we wait for all
5323 	 * in-flight operations which may do put_pwq().
5324 	 */
5325 	mutex_lock(&wq_pool_mutex);
5326 	mutex_lock(&wq->mutex);
5327 	for_each_pwq(pwq, wq) {
5328 		raw_spin_lock_irq(&pwq->pool->lock);
5329 		if (WARN_ON(pwq_busy(pwq))) {
5330 			pr_warn("%s: %s has the following busy pwq\n",
5331 				__func__, wq->name);
5332 			show_pwq(pwq);
5333 			raw_spin_unlock_irq(&pwq->pool->lock);
5334 			mutex_unlock(&wq->mutex);
5335 			mutex_unlock(&wq_pool_mutex);
5336 			show_one_workqueue(wq);
5337 			return;
5338 		}
5339 		raw_spin_unlock_irq(&pwq->pool->lock);
5340 	}
5341 	mutex_unlock(&wq->mutex);
5342 
5343 	/*
5344 	 * wq list is used to freeze wq, remove from list after
5345 	 * flushing is complete in case freeze races us.
5346 	 */
5347 	list_del_rcu(&wq->list);
5348 	mutex_unlock(&wq_pool_mutex);
5349 
5350 	/*
5351 	 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
5352 	 * to put the base refs. @wq will be auto-destroyed from the last
5353 	 * pwq_put. RCU read lock prevents @wq from going away from under us.
5354 	 */
5355 	rcu_read_lock();
5356 
5357 	for_each_possible_cpu(cpu) {
5358 		put_pwq_unlocked(unbound_pwq(wq, cpu));
5359 		RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL);
5360 	}
5361 
5362 	put_pwq_unlocked(unbound_pwq(wq, -1));
5363 	RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL);
5364 
5365 	rcu_read_unlock();
5366 }
5367 EXPORT_SYMBOL_GPL(destroy_workqueue);
5368 
5369 /**
5370  * workqueue_set_max_active - adjust max_active of a workqueue
5371  * @wq: target workqueue
5372  * @max_active: new max_active value.
5373  *
5374  * Set max_active of @wq to @max_active. See the alloc_workqueue() function
5375  * comment.
5376  *
5377  * CONTEXT:
5378  * Don't call from IRQ context.
5379  */
5380 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
5381 {
5382 	/* disallow meddling with max_active for ordered workqueues */
5383 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5384 		return;
5385 
5386 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
5387 
5388 	mutex_lock(&wq->mutex);
5389 
5390 	wq->flags &= ~__WQ_ORDERED;
5391 	wq->saved_max_active = max_active;
5392 	if (wq->flags & WQ_UNBOUND)
5393 		wq->saved_min_active = min(wq->saved_min_active, max_active);
5394 
5395 	wq_adjust_max_active(wq);
5396 
5397 	mutex_unlock(&wq->mutex);
5398 }
5399 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
5400 
5401 /**
5402  * current_work - retrieve %current task's work struct
5403  *
5404  * Determine if %current task is a workqueue worker and what it's working on.
5405  * Useful to find out the context that the %current task is running in.
5406  *
5407  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
5408  */
5409 struct work_struct *current_work(void)
5410 {
5411 	struct worker *worker = current_wq_worker();
5412 
5413 	return worker ? worker->current_work : NULL;
5414 }
5415 EXPORT_SYMBOL(current_work);
5416 
5417 /**
5418  * current_is_workqueue_rescuer - is %current workqueue rescuer?
5419  *
5420  * Determine whether %current is a workqueue rescuer.  Can be used from
5421  * work functions to determine whether it's being run off the rescuer task.
5422  *
5423  * Return: %true if %current is a workqueue rescuer. %false otherwise.
5424  */
5425 bool current_is_workqueue_rescuer(void)
5426 {
5427 	struct worker *worker = current_wq_worker();
5428 
5429 	return worker && worker->rescue_wq;
5430 }
5431 
5432 /**
5433  * workqueue_congested - test whether a workqueue is congested
5434  * @cpu: CPU in question
5435  * @wq: target workqueue
5436  *
5437  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
5438  * no synchronization around this function and the test result is
5439  * unreliable and only useful as advisory hints or for debugging.
5440  *
5441  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
5442  *
5443  * With the exception of ordered workqueues, all workqueues have per-cpu
5444  * pool_workqueues, each with its own congested state. A workqueue being
5445  * congested on one CPU doesn't mean that the workqueue is contested on any
5446  * other CPUs.
5447  *
5448  * Return:
5449  * %true if congested, %false otherwise.
5450  */
5451 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
5452 {
5453 	struct pool_workqueue *pwq;
5454 	bool ret;
5455 
5456 	rcu_read_lock();
5457 	preempt_disable();
5458 
5459 	if (cpu == WORK_CPU_UNBOUND)
5460 		cpu = smp_processor_id();
5461 
5462 	pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
5463 	ret = !list_empty(&pwq->inactive_works);
5464 
5465 	preempt_enable();
5466 	rcu_read_unlock();
5467 
5468 	return ret;
5469 }
5470 EXPORT_SYMBOL_GPL(workqueue_congested);
5471 
5472 /**
5473  * work_busy - test whether a work is currently pending or running
5474  * @work: the work to be tested
5475  *
5476  * Test whether @work is currently pending or running.  There is no
5477  * synchronization around this function and the test result is
5478  * unreliable and only useful as advisory hints or for debugging.
5479  *
5480  * Return:
5481  * OR'd bitmask of WORK_BUSY_* bits.
5482  */
5483 unsigned int work_busy(struct work_struct *work)
5484 {
5485 	struct worker_pool *pool;
5486 	unsigned long flags;
5487 	unsigned int ret = 0;
5488 
5489 	if (work_pending(work))
5490 		ret |= WORK_BUSY_PENDING;
5491 
5492 	rcu_read_lock();
5493 	pool = get_work_pool(work);
5494 	if (pool) {
5495 		raw_spin_lock_irqsave(&pool->lock, flags);
5496 		if (find_worker_executing_work(pool, work))
5497 			ret |= WORK_BUSY_RUNNING;
5498 		raw_spin_unlock_irqrestore(&pool->lock, flags);
5499 	}
5500 	rcu_read_unlock();
5501 
5502 	return ret;
5503 }
5504 EXPORT_SYMBOL_GPL(work_busy);
5505 
5506 /**
5507  * set_worker_desc - set description for the current work item
5508  * @fmt: printf-style format string
5509  * @...: arguments for the format string
5510  *
5511  * This function can be called by a running work function to describe what
5512  * the work item is about.  If the worker task gets dumped, this
5513  * information will be printed out together to help debugging.  The
5514  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
5515  */
5516 void set_worker_desc(const char *fmt, ...)
5517 {
5518 	struct worker *worker = current_wq_worker();
5519 	va_list args;
5520 
5521 	if (worker) {
5522 		va_start(args, fmt);
5523 		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
5524 		va_end(args);
5525 	}
5526 }
5527 EXPORT_SYMBOL_GPL(set_worker_desc);
5528 
5529 /**
5530  * print_worker_info - print out worker information and description
5531  * @log_lvl: the log level to use when printing
5532  * @task: target task
5533  *
5534  * If @task is a worker and currently executing a work item, print out the
5535  * name of the workqueue being serviced and worker description set with
5536  * set_worker_desc() by the currently executing work item.
5537  *
5538  * This function can be safely called on any task as long as the
5539  * task_struct itself is accessible.  While safe, this function isn't
5540  * synchronized and may print out mixups or garbages of limited length.
5541  */
5542 void print_worker_info(const char *log_lvl, struct task_struct *task)
5543 {
5544 	work_func_t *fn = NULL;
5545 	char name[WQ_NAME_LEN] = { };
5546 	char desc[WORKER_DESC_LEN] = { };
5547 	struct pool_workqueue *pwq = NULL;
5548 	struct workqueue_struct *wq = NULL;
5549 	struct worker *worker;
5550 
5551 	if (!(task->flags & PF_WQ_WORKER))
5552 		return;
5553 
5554 	/*
5555 	 * This function is called without any synchronization and @task
5556 	 * could be in any state.  Be careful with dereferences.
5557 	 */
5558 	worker = kthread_probe_data(task);
5559 
5560 	/*
5561 	 * Carefully copy the associated workqueue's workfn, name and desc.
5562 	 * Keep the original last '\0' in case the original is garbage.
5563 	 */
5564 	copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
5565 	copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
5566 	copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
5567 	copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
5568 	copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
5569 
5570 	if (fn || name[0] || desc[0]) {
5571 		printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
5572 		if (strcmp(name, desc))
5573 			pr_cont(" (%s)", desc);
5574 		pr_cont("\n");
5575 	}
5576 }
5577 
5578 static void pr_cont_pool_info(struct worker_pool *pool)
5579 {
5580 	pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
5581 	if (pool->node != NUMA_NO_NODE)
5582 		pr_cont(" node=%d", pool->node);
5583 	pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
5584 }
5585 
5586 struct pr_cont_work_struct {
5587 	bool comma;
5588 	work_func_t func;
5589 	long ctr;
5590 };
5591 
5592 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
5593 {
5594 	if (!pcwsp->ctr)
5595 		goto out_record;
5596 	if (func == pcwsp->func) {
5597 		pcwsp->ctr++;
5598 		return;
5599 	}
5600 	if (pcwsp->ctr == 1)
5601 		pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
5602 	else
5603 		pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
5604 	pcwsp->ctr = 0;
5605 out_record:
5606 	if ((long)func == -1L)
5607 		return;
5608 	pcwsp->comma = comma;
5609 	pcwsp->func = func;
5610 	pcwsp->ctr = 1;
5611 }
5612 
5613 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
5614 {
5615 	if (work->func == wq_barrier_func) {
5616 		struct wq_barrier *barr;
5617 
5618 		barr = container_of(work, struct wq_barrier, work);
5619 
5620 		pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5621 		pr_cont("%s BAR(%d)", comma ? "," : "",
5622 			task_pid_nr(barr->task));
5623 	} else {
5624 		if (!comma)
5625 			pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5626 		pr_cont_work_flush(comma, work->func, pcwsp);
5627 	}
5628 }
5629 
5630 static void show_pwq(struct pool_workqueue *pwq)
5631 {
5632 	struct pr_cont_work_struct pcws = { .ctr = 0, };
5633 	struct worker_pool *pool = pwq->pool;
5634 	struct work_struct *work;
5635 	struct worker *worker;
5636 	bool has_in_flight = false, has_pending = false;
5637 	int bkt;
5638 
5639 	pr_info("  pwq %d:", pool->id);
5640 	pr_cont_pool_info(pool);
5641 
5642 	pr_cont(" active=%d refcnt=%d%s\n",
5643 		pwq->nr_active, pwq->refcnt,
5644 		!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
5645 
5646 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5647 		if (worker->current_pwq == pwq) {
5648 			has_in_flight = true;
5649 			break;
5650 		}
5651 	}
5652 	if (has_in_flight) {
5653 		bool comma = false;
5654 
5655 		pr_info("    in-flight:");
5656 		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5657 			if (worker->current_pwq != pwq)
5658 				continue;
5659 
5660 			pr_cont("%s %d%s:%ps", comma ? "," : "",
5661 				task_pid_nr(worker->task),
5662 				worker->rescue_wq ? "(RESCUER)" : "",
5663 				worker->current_func);
5664 			list_for_each_entry(work, &worker->scheduled, entry)
5665 				pr_cont_work(false, work, &pcws);
5666 			pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5667 			comma = true;
5668 		}
5669 		pr_cont("\n");
5670 	}
5671 
5672 	list_for_each_entry(work, &pool->worklist, entry) {
5673 		if (get_work_pwq(work) == pwq) {
5674 			has_pending = true;
5675 			break;
5676 		}
5677 	}
5678 	if (has_pending) {
5679 		bool comma = false;
5680 
5681 		pr_info("    pending:");
5682 		list_for_each_entry(work, &pool->worklist, entry) {
5683 			if (get_work_pwq(work) != pwq)
5684 				continue;
5685 
5686 			pr_cont_work(comma, work, &pcws);
5687 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5688 		}
5689 		pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5690 		pr_cont("\n");
5691 	}
5692 
5693 	if (!list_empty(&pwq->inactive_works)) {
5694 		bool comma = false;
5695 
5696 		pr_info("    inactive:");
5697 		list_for_each_entry(work, &pwq->inactive_works, entry) {
5698 			pr_cont_work(comma, work, &pcws);
5699 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5700 		}
5701 		pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
5702 		pr_cont("\n");
5703 	}
5704 }
5705 
5706 /**
5707  * show_one_workqueue - dump state of specified workqueue
5708  * @wq: workqueue whose state will be printed
5709  */
5710 void show_one_workqueue(struct workqueue_struct *wq)
5711 {
5712 	struct pool_workqueue *pwq;
5713 	bool idle = true;
5714 	unsigned long flags;
5715 
5716 	for_each_pwq(pwq, wq) {
5717 		if (!pwq_is_empty(pwq)) {
5718 			idle = false;
5719 			break;
5720 		}
5721 	}
5722 	if (idle) /* Nothing to print for idle workqueue */
5723 		return;
5724 
5725 	pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
5726 
5727 	for_each_pwq(pwq, wq) {
5728 		raw_spin_lock_irqsave(&pwq->pool->lock, flags);
5729 		if (!pwq_is_empty(pwq)) {
5730 			/*
5731 			 * Defer printing to avoid deadlocks in console
5732 			 * drivers that queue work while holding locks
5733 			 * also taken in their write paths.
5734 			 */
5735 			printk_deferred_enter();
5736 			show_pwq(pwq);
5737 			printk_deferred_exit();
5738 		}
5739 		raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
5740 		/*
5741 		 * We could be printing a lot from atomic context, e.g.
5742 		 * sysrq-t -> show_all_workqueues(). Avoid triggering
5743 		 * hard lockup.
5744 		 */
5745 		touch_nmi_watchdog();
5746 	}
5747 
5748 }
5749 
5750 /**
5751  * show_one_worker_pool - dump state of specified worker pool
5752  * @pool: worker pool whose state will be printed
5753  */
5754 static void show_one_worker_pool(struct worker_pool *pool)
5755 {
5756 	struct worker *worker;
5757 	bool first = true;
5758 	unsigned long flags;
5759 	unsigned long hung = 0;
5760 
5761 	raw_spin_lock_irqsave(&pool->lock, flags);
5762 	if (pool->nr_workers == pool->nr_idle)
5763 		goto next_pool;
5764 
5765 	/* How long the first pending work is waiting for a worker. */
5766 	if (!list_empty(&pool->worklist))
5767 		hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
5768 
5769 	/*
5770 	 * Defer printing to avoid deadlocks in console drivers that
5771 	 * queue work while holding locks also taken in their write
5772 	 * paths.
5773 	 */
5774 	printk_deferred_enter();
5775 	pr_info("pool %d:", pool->id);
5776 	pr_cont_pool_info(pool);
5777 	pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
5778 	if (pool->manager)
5779 		pr_cont(" manager: %d",
5780 			task_pid_nr(pool->manager->task));
5781 	list_for_each_entry(worker, &pool->idle_list, entry) {
5782 		pr_cont(" %s%d", first ? "idle: " : "",
5783 			task_pid_nr(worker->task));
5784 		first = false;
5785 	}
5786 	pr_cont("\n");
5787 	printk_deferred_exit();
5788 next_pool:
5789 	raw_spin_unlock_irqrestore(&pool->lock, flags);
5790 	/*
5791 	 * We could be printing a lot from atomic context, e.g.
5792 	 * sysrq-t -> show_all_workqueues(). Avoid triggering
5793 	 * hard lockup.
5794 	 */
5795 	touch_nmi_watchdog();
5796 
5797 }
5798 
5799 /**
5800  * show_all_workqueues - dump workqueue state
5801  *
5802  * Called from a sysrq handler and prints out all busy workqueues and pools.
5803  */
5804 void show_all_workqueues(void)
5805 {
5806 	struct workqueue_struct *wq;
5807 	struct worker_pool *pool;
5808 	int pi;
5809 
5810 	rcu_read_lock();
5811 
5812 	pr_info("Showing busy workqueues and worker pools:\n");
5813 
5814 	list_for_each_entry_rcu(wq, &workqueues, list)
5815 		show_one_workqueue(wq);
5816 
5817 	for_each_pool(pool, pi)
5818 		show_one_worker_pool(pool);
5819 
5820 	rcu_read_unlock();
5821 }
5822 
5823 /**
5824  * show_freezable_workqueues - dump freezable workqueue state
5825  *
5826  * Called from try_to_freeze_tasks() and prints out all freezable workqueues
5827  * still busy.
5828  */
5829 void show_freezable_workqueues(void)
5830 {
5831 	struct workqueue_struct *wq;
5832 
5833 	rcu_read_lock();
5834 
5835 	pr_info("Showing freezable workqueues that are still busy:\n");
5836 
5837 	list_for_each_entry_rcu(wq, &workqueues, list) {
5838 		if (!(wq->flags & WQ_FREEZABLE))
5839 			continue;
5840 		show_one_workqueue(wq);
5841 	}
5842 
5843 	rcu_read_unlock();
5844 }
5845 
5846 /* used to show worker information through /proc/PID/{comm,stat,status} */
5847 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
5848 {
5849 	int off;
5850 
5851 	/* always show the actual comm */
5852 	off = strscpy(buf, task->comm, size);
5853 	if (off < 0)
5854 		return;
5855 
5856 	/* stabilize PF_WQ_WORKER and worker pool association */
5857 	mutex_lock(&wq_pool_attach_mutex);
5858 
5859 	if (task->flags & PF_WQ_WORKER) {
5860 		struct worker *worker = kthread_data(task);
5861 		struct worker_pool *pool = worker->pool;
5862 
5863 		if (pool) {
5864 			raw_spin_lock_irq(&pool->lock);
5865 			/*
5866 			 * ->desc tracks information (wq name or
5867 			 * set_worker_desc()) for the latest execution.  If
5868 			 * current, prepend '+', otherwise '-'.
5869 			 */
5870 			if (worker->desc[0] != '\0') {
5871 				if (worker->current_work)
5872 					scnprintf(buf + off, size - off, "+%s",
5873 						  worker->desc);
5874 				else
5875 					scnprintf(buf + off, size - off, "-%s",
5876 						  worker->desc);
5877 			}
5878 			raw_spin_unlock_irq(&pool->lock);
5879 		}
5880 	}
5881 
5882 	mutex_unlock(&wq_pool_attach_mutex);
5883 }
5884 
5885 #ifdef CONFIG_SMP
5886 
5887 /*
5888  * CPU hotplug.
5889  *
5890  * There are two challenges in supporting CPU hotplug.  Firstly, there
5891  * are a lot of assumptions on strong associations among work, pwq and
5892  * pool which make migrating pending and scheduled works very
5893  * difficult to implement without impacting hot paths.  Secondly,
5894  * worker pools serve mix of short, long and very long running works making
5895  * blocked draining impractical.
5896  *
5897  * This is solved by allowing the pools to be disassociated from the CPU
5898  * running as an unbound one and allowing it to be reattached later if the
5899  * cpu comes back online.
5900  */
5901 
5902 static void unbind_workers(int cpu)
5903 {
5904 	struct worker_pool *pool;
5905 	struct worker *worker;
5906 
5907 	for_each_cpu_worker_pool(pool, cpu) {
5908 		mutex_lock(&wq_pool_attach_mutex);
5909 		raw_spin_lock_irq(&pool->lock);
5910 
5911 		/*
5912 		 * We've blocked all attach/detach operations. Make all workers
5913 		 * unbound and set DISASSOCIATED.  Before this, all workers
5914 		 * must be on the cpu.  After this, they may become diasporas.
5915 		 * And the preemption disabled section in their sched callbacks
5916 		 * are guaranteed to see WORKER_UNBOUND since the code here
5917 		 * is on the same cpu.
5918 		 */
5919 		for_each_pool_worker(worker, pool)
5920 			worker->flags |= WORKER_UNBOUND;
5921 
5922 		pool->flags |= POOL_DISASSOCIATED;
5923 
5924 		/*
5925 		 * The handling of nr_running in sched callbacks are disabled
5926 		 * now.  Zap nr_running.  After this, nr_running stays zero and
5927 		 * need_more_worker() and keep_working() are always true as
5928 		 * long as the worklist is not empty.  This pool now behaves as
5929 		 * an unbound (in terms of concurrency management) pool which
5930 		 * are served by workers tied to the pool.
5931 		 */
5932 		pool->nr_running = 0;
5933 
5934 		/*
5935 		 * With concurrency management just turned off, a busy
5936 		 * worker blocking could lead to lengthy stalls.  Kick off
5937 		 * unbound chain execution of currently pending work items.
5938 		 */
5939 		kick_pool(pool);
5940 
5941 		raw_spin_unlock_irq(&pool->lock);
5942 
5943 		for_each_pool_worker(worker, pool)
5944 			unbind_worker(worker);
5945 
5946 		mutex_unlock(&wq_pool_attach_mutex);
5947 	}
5948 }
5949 
5950 /**
5951  * rebind_workers - rebind all workers of a pool to the associated CPU
5952  * @pool: pool of interest
5953  *
5954  * @pool->cpu is coming online.  Rebind all workers to the CPU.
5955  */
5956 static void rebind_workers(struct worker_pool *pool)
5957 {
5958 	struct worker *worker;
5959 
5960 	lockdep_assert_held(&wq_pool_attach_mutex);
5961 
5962 	/*
5963 	 * Restore CPU affinity of all workers.  As all idle workers should
5964 	 * be on the run-queue of the associated CPU before any local
5965 	 * wake-ups for concurrency management happen, restore CPU affinity
5966 	 * of all workers first and then clear UNBOUND.  As we're called
5967 	 * from CPU_ONLINE, the following shouldn't fail.
5968 	 */
5969 	for_each_pool_worker(worker, pool) {
5970 		kthread_set_per_cpu(worker->task, pool->cpu);
5971 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
5972 						  pool_allowed_cpus(pool)) < 0);
5973 	}
5974 
5975 	raw_spin_lock_irq(&pool->lock);
5976 
5977 	pool->flags &= ~POOL_DISASSOCIATED;
5978 
5979 	for_each_pool_worker(worker, pool) {
5980 		unsigned int worker_flags = worker->flags;
5981 
5982 		/*
5983 		 * We want to clear UNBOUND but can't directly call
5984 		 * worker_clr_flags() or adjust nr_running.  Atomically
5985 		 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5986 		 * @worker will clear REBOUND using worker_clr_flags() when
5987 		 * it initiates the next execution cycle thus restoring
5988 		 * concurrency management.  Note that when or whether
5989 		 * @worker clears REBOUND doesn't affect correctness.
5990 		 *
5991 		 * WRITE_ONCE() is necessary because @worker->flags may be
5992 		 * tested without holding any lock in
5993 		 * wq_worker_running().  Without it, NOT_RUNNING test may
5994 		 * fail incorrectly leading to premature concurrency
5995 		 * management operations.
5996 		 */
5997 		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5998 		worker_flags |= WORKER_REBOUND;
5999 		worker_flags &= ~WORKER_UNBOUND;
6000 		WRITE_ONCE(worker->flags, worker_flags);
6001 	}
6002 
6003 	raw_spin_unlock_irq(&pool->lock);
6004 }
6005 
6006 /**
6007  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
6008  * @pool: unbound pool of interest
6009  * @cpu: the CPU which is coming up
6010  *
6011  * An unbound pool may end up with a cpumask which doesn't have any online
6012  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
6013  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
6014  * online CPU before, cpus_allowed of all its workers should be restored.
6015  */
6016 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
6017 {
6018 	static cpumask_t cpumask;
6019 	struct worker *worker;
6020 
6021 	lockdep_assert_held(&wq_pool_attach_mutex);
6022 
6023 	/* is @cpu allowed for @pool? */
6024 	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
6025 		return;
6026 
6027 	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
6028 
6029 	/* as we're called from CPU_ONLINE, the following shouldn't fail */
6030 	for_each_pool_worker(worker, pool)
6031 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
6032 }
6033 
6034 int workqueue_prepare_cpu(unsigned int cpu)
6035 {
6036 	struct worker_pool *pool;
6037 
6038 	for_each_cpu_worker_pool(pool, cpu) {
6039 		if (pool->nr_workers)
6040 			continue;
6041 		if (!create_worker(pool))
6042 			return -ENOMEM;
6043 	}
6044 	return 0;
6045 }
6046 
6047 int workqueue_online_cpu(unsigned int cpu)
6048 {
6049 	struct worker_pool *pool;
6050 	struct workqueue_struct *wq;
6051 	int pi;
6052 
6053 	mutex_lock(&wq_pool_mutex);
6054 
6055 	for_each_pool(pool, pi) {
6056 		mutex_lock(&wq_pool_attach_mutex);
6057 
6058 		if (pool->cpu == cpu)
6059 			rebind_workers(pool);
6060 		else if (pool->cpu < 0)
6061 			restore_unbound_workers_cpumask(pool, cpu);
6062 
6063 		mutex_unlock(&wq_pool_attach_mutex);
6064 	}
6065 
6066 	/* update pod affinity of unbound workqueues */
6067 	list_for_each_entry(wq, &workqueues, list) {
6068 		struct workqueue_attrs *attrs = wq->unbound_attrs;
6069 
6070 		if (attrs) {
6071 			const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
6072 			int tcpu;
6073 
6074 			for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6075 				wq_update_pod(wq, tcpu, cpu, true);
6076 
6077 			mutex_lock(&wq->mutex);
6078 			wq_update_node_max_active(wq, -1);
6079 			mutex_unlock(&wq->mutex);
6080 		}
6081 	}
6082 
6083 	mutex_unlock(&wq_pool_mutex);
6084 	return 0;
6085 }
6086 
6087 int workqueue_offline_cpu(unsigned int cpu)
6088 {
6089 	struct workqueue_struct *wq;
6090 
6091 	/* unbinding per-cpu workers should happen on the local CPU */
6092 	if (WARN_ON(cpu != smp_processor_id()))
6093 		return -1;
6094 
6095 	unbind_workers(cpu);
6096 
6097 	/* update pod affinity of unbound workqueues */
6098 	mutex_lock(&wq_pool_mutex);
6099 	list_for_each_entry(wq, &workqueues, list) {
6100 		struct workqueue_attrs *attrs = wq->unbound_attrs;
6101 
6102 		if (attrs) {
6103 			const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
6104 			int tcpu;
6105 
6106 			for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6107 				wq_update_pod(wq, tcpu, cpu, false);
6108 
6109 			mutex_lock(&wq->mutex);
6110 			wq_update_node_max_active(wq, cpu);
6111 			mutex_unlock(&wq->mutex);
6112 		}
6113 	}
6114 	mutex_unlock(&wq_pool_mutex);
6115 
6116 	return 0;
6117 }
6118 
6119 struct work_for_cpu {
6120 	struct work_struct work;
6121 	long (*fn)(void *);
6122 	void *arg;
6123 	long ret;
6124 };
6125 
6126 static void work_for_cpu_fn(struct work_struct *work)
6127 {
6128 	struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
6129 
6130 	wfc->ret = wfc->fn(wfc->arg);
6131 }
6132 
6133 /**
6134  * work_on_cpu_key - run a function in thread context on a particular cpu
6135  * @cpu: the cpu to run on
6136  * @fn: the function to run
6137  * @arg: the function arg
6138  * @key: The lock class key for lock debugging purposes
6139  *
6140  * It is up to the caller to ensure that the cpu doesn't go offline.
6141  * The caller must not hold any locks which would prevent @fn from completing.
6142  *
6143  * Return: The value @fn returns.
6144  */
6145 long work_on_cpu_key(int cpu, long (*fn)(void *),
6146 		     void *arg, struct lock_class_key *key)
6147 {
6148 	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
6149 
6150 	INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
6151 	schedule_work_on(cpu, &wfc.work);
6152 	flush_work(&wfc.work);
6153 	destroy_work_on_stack(&wfc.work);
6154 	return wfc.ret;
6155 }
6156 EXPORT_SYMBOL_GPL(work_on_cpu_key);
6157 
6158 /**
6159  * work_on_cpu_safe_key - run a function in thread context on a particular cpu
6160  * @cpu: the cpu to run on
6161  * @fn:  the function to run
6162  * @arg: the function argument
6163  * @key: The lock class key for lock debugging purposes
6164  *
6165  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
6166  * any locks which would prevent @fn from completing.
6167  *
6168  * Return: The value @fn returns.
6169  */
6170 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
6171 			  void *arg, struct lock_class_key *key)
6172 {
6173 	long ret = -ENODEV;
6174 
6175 	cpus_read_lock();
6176 	if (cpu_online(cpu))
6177 		ret = work_on_cpu_key(cpu, fn, arg, key);
6178 	cpus_read_unlock();
6179 	return ret;
6180 }
6181 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
6182 #endif /* CONFIG_SMP */
6183 
6184 #ifdef CONFIG_FREEZER
6185 
6186 /**
6187  * freeze_workqueues_begin - begin freezing workqueues
6188  *
6189  * Start freezing workqueues.  After this function returns, all freezable
6190  * workqueues will queue new works to their inactive_works list instead of
6191  * pool->worklist.
6192  *
6193  * CONTEXT:
6194  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6195  */
6196 void freeze_workqueues_begin(void)
6197 {
6198 	struct workqueue_struct *wq;
6199 
6200 	mutex_lock(&wq_pool_mutex);
6201 
6202 	WARN_ON_ONCE(workqueue_freezing);
6203 	workqueue_freezing = true;
6204 
6205 	list_for_each_entry(wq, &workqueues, list) {
6206 		mutex_lock(&wq->mutex);
6207 		wq_adjust_max_active(wq);
6208 		mutex_unlock(&wq->mutex);
6209 	}
6210 
6211 	mutex_unlock(&wq_pool_mutex);
6212 }
6213 
6214 /**
6215  * freeze_workqueues_busy - are freezable workqueues still busy?
6216  *
6217  * Check whether freezing is complete.  This function must be called
6218  * between freeze_workqueues_begin() and thaw_workqueues().
6219  *
6220  * CONTEXT:
6221  * Grabs and releases wq_pool_mutex.
6222  *
6223  * Return:
6224  * %true if some freezable workqueues are still busy.  %false if freezing
6225  * is complete.
6226  */
6227 bool freeze_workqueues_busy(void)
6228 {
6229 	bool busy = false;
6230 	struct workqueue_struct *wq;
6231 	struct pool_workqueue *pwq;
6232 
6233 	mutex_lock(&wq_pool_mutex);
6234 
6235 	WARN_ON_ONCE(!workqueue_freezing);
6236 
6237 	list_for_each_entry(wq, &workqueues, list) {
6238 		if (!(wq->flags & WQ_FREEZABLE))
6239 			continue;
6240 		/*
6241 		 * nr_active is monotonically decreasing.  It's safe
6242 		 * to peek without lock.
6243 		 */
6244 		rcu_read_lock();
6245 		for_each_pwq(pwq, wq) {
6246 			WARN_ON_ONCE(pwq->nr_active < 0);
6247 			if (pwq->nr_active) {
6248 				busy = true;
6249 				rcu_read_unlock();
6250 				goto out_unlock;
6251 			}
6252 		}
6253 		rcu_read_unlock();
6254 	}
6255 out_unlock:
6256 	mutex_unlock(&wq_pool_mutex);
6257 	return busy;
6258 }
6259 
6260 /**
6261  * thaw_workqueues - thaw workqueues
6262  *
6263  * Thaw workqueues.  Normal queueing is restored and all collected
6264  * frozen works are transferred to their respective pool worklists.
6265  *
6266  * CONTEXT:
6267  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6268  */
6269 void thaw_workqueues(void)
6270 {
6271 	struct workqueue_struct *wq;
6272 
6273 	mutex_lock(&wq_pool_mutex);
6274 
6275 	if (!workqueue_freezing)
6276 		goto out_unlock;
6277 
6278 	workqueue_freezing = false;
6279 
6280 	/* restore max_active and repopulate worklist */
6281 	list_for_each_entry(wq, &workqueues, list) {
6282 		mutex_lock(&wq->mutex);
6283 		wq_adjust_max_active(wq);
6284 		mutex_unlock(&wq->mutex);
6285 	}
6286 
6287 out_unlock:
6288 	mutex_unlock(&wq_pool_mutex);
6289 }
6290 #endif /* CONFIG_FREEZER */
6291 
6292 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
6293 {
6294 	LIST_HEAD(ctxs);
6295 	int ret = 0;
6296 	struct workqueue_struct *wq;
6297 	struct apply_wqattrs_ctx *ctx, *n;
6298 
6299 	lockdep_assert_held(&wq_pool_mutex);
6300 
6301 	list_for_each_entry(wq, &workqueues, list) {
6302 		if (!(wq->flags & WQ_UNBOUND))
6303 			continue;
6304 
6305 		/* creating multiple pwqs breaks ordering guarantee */
6306 		if (!list_empty(&wq->pwqs)) {
6307 			if (wq->flags & __WQ_ORDERED_EXPLICIT)
6308 				continue;
6309 			wq->flags &= ~__WQ_ORDERED;
6310 		}
6311 
6312 		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
6313 		if (IS_ERR(ctx)) {
6314 			ret = PTR_ERR(ctx);
6315 			break;
6316 		}
6317 
6318 		list_add_tail(&ctx->list, &ctxs);
6319 	}
6320 
6321 	list_for_each_entry_safe(ctx, n, &ctxs, list) {
6322 		if (!ret)
6323 			apply_wqattrs_commit(ctx);
6324 		apply_wqattrs_cleanup(ctx);
6325 	}
6326 
6327 	if (!ret) {
6328 		mutex_lock(&wq_pool_attach_mutex);
6329 		cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
6330 		mutex_unlock(&wq_pool_attach_mutex);
6331 	}
6332 	return ret;
6333 }
6334 
6335 /**
6336  * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask
6337  * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask
6338  *
6339  * This function can be called from cpuset code to provide a set of isolated
6340  * CPUs that should be excluded from wq_unbound_cpumask. The caller must hold
6341  * either cpus_read_lock or cpus_write_lock.
6342  */
6343 int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
6344 {
6345 	cpumask_var_t cpumask;
6346 	int ret = 0;
6347 
6348 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6349 		return -ENOMEM;
6350 
6351 	lockdep_assert_cpus_held();
6352 	mutex_lock(&wq_pool_mutex);
6353 
6354 	/* Save the current isolated cpumask & export it via sysfs */
6355 	cpumask_copy(wq_isolated_cpumask, exclude_cpumask);
6356 
6357 	/*
6358 	 * If the operation fails, it will fall back to
6359 	 * wq_requested_unbound_cpumask which is initially set to
6360 	 * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten
6361 	 * by any subsequent write to workqueue/cpumask sysfs file.
6362 	 */
6363 	if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask))
6364 		cpumask_copy(cpumask, wq_requested_unbound_cpumask);
6365 	if (!cpumask_equal(cpumask, wq_unbound_cpumask))
6366 		ret = workqueue_apply_unbound_cpumask(cpumask);
6367 
6368 	mutex_unlock(&wq_pool_mutex);
6369 	free_cpumask_var(cpumask);
6370 	return ret;
6371 }
6372 
6373 static int parse_affn_scope(const char *val)
6374 {
6375 	int i;
6376 
6377 	for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
6378 		if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
6379 			return i;
6380 	}
6381 	return -EINVAL;
6382 }
6383 
6384 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
6385 {
6386 	struct workqueue_struct *wq;
6387 	int affn, cpu;
6388 
6389 	affn = parse_affn_scope(val);
6390 	if (affn < 0)
6391 		return affn;
6392 	if (affn == WQ_AFFN_DFL)
6393 		return -EINVAL;
6394 
6395 	cpus_read_lock();
6396 	mutex_lock(&wq_pool_mutex);
6397 
6398 	wq_affn_dfl = affn;
6399 
6400 	list_for_each_entry(wq, &workqueues, list) {
6401 		for_each_online_cpu(cpu) {
6402 			wq_update_pod(wq, cpu, cpu, true);
6403 		}
6404 	}
6405 
6406 	mutex_unlock(&wq_pool_mutex);
6407 	cpus_read_unlock();
6408 
6409 	return 0;
6410 }
6411 
6412 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
6413 {
6414 	return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
6415 }
6416 
6417 static const struct kernel_param_ops wq_affn_dfl_ops = {
6418 	.set	= wq_affn_dfl_set,
6419 	.get	= wq_affn_dfl_get,
6420 };
6421 
6422 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
6423 
6424 #ifdef CONFIG_SYSFS
6425 /*
6426  * Workqueues with WQ_SYSFS flag set is visible to userland via
6427  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
6428  * following attributes.
6429  *
6430  *  per_cpu		RO bool	: whether the workqueue is per-cpu or unbound
6431  *  max_active		RW int	: maximum number of in-flight work items
6432  *
6433  * Unbound workqueues have the following extra attributes.
6434  *
6435  *  nice		RW int	: nice value of the workers
6436  *  cpumask		RW mask	: bitmask of allowed CPUs for the workers
6437  *  affinity_scope	RW str  : worker CPU affinity scope (cache, numa, none)
6438  *  affinity_strict	RW bool : worker CPU affinity is strict
6439  */
6440 struct wq_device {
6441 	struct workqueue_struct		*wq;
6442 	struct device			dev;
6443 };
6444 
6445 static struct workqueue_struct *dev_to_wq(struct device *dev)
6446 {
6447 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6448 
6449 	return wq_dev->wq;
6450 }
6451 
6452 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
6453 			    char *buf)
6454 {
6455 	struct workqueue_struct *wq = dev_to_wq(dev);
6456 
6457 	return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
6458 }
6459 static DEVICE_ATTR_RO(per_cpu);
6460 
6461 static ssize_t max_active_show(struct device *dev,
6462 			       struct device_attribute *attr, char *buf)
6463 {
6464 	struct workqueue_struct *wq = dev_to_wq(dev);
6465 
6466 	return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
6467 }
6468 
6469 static ssize_t max_active_store(struct device *dev,
6470 				struct device_attribute *attr, const char *buf,
6471 				size_t count)
6472 {
6473 	struct workqueue_struct *wq = dev_to_wq(dev);
6474 	int val;
6475 
6476 	if (sscanf(buf, "%d", &val) != 1 || val <= 0)
6477 		return -EINVAL;
6478 
6479 	workqueue_set_max_active(wq, val);
6480 	return count;
6481 }
6482 static DEVICE_ATTR_RW(max_active);
6483 
6484 static struct attribute *wq_sysfs_attrs[] = {
6485 	&dev_attr_per_cpu.attr,
6486 	&dev_attr_max_active.attr,
6487 	NULL,
6488 };
6489 ATTRIBUTE_GROUPS(wq_sysfs);
6490 
6491 static void apply_wqattrs_lock(void)
6492 {
6493 	/* CPUs should stay stable across pwq creations and installations */
6494 	cpus_read_lock();
6495 	mutex_lock(&wq_pool_mutex);
6496 }
6497 
6498 static void apply_wqattrs_unlock(void)
6499 {
6500 	mutex_unlock(&wq_pool_mutex);
6501 	cpus_read_unlock();
6502 }
6503 
6504 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
6505 			    char *buf)
6506 {
6507 	struct workqueue_struct *wq = dev_to_wq(dev);
6508 	int written;
6509 
6510 	mutex_lock(&wq->mutex);
6511 	written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
6512 	mutex_unlock(&wq->mutex);
6513 
6514 	return written;
6515 }
6516 
6517 /* prepare workqueue_attrs for sysfs store operations */
6518 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
6519 {
6520 	struct workqueue_attrs *attrs;
6521 
6522 	lockdep_assert_held(&wq_pool_mutex);
6523 
6524 	attrs = alloc_workqueue_attrs();
6525 	if (!attrs)
6526 		return NULL;
6527 
6528 	copy_workqueue_attrs(attrs, wq->unbound_attrs);
6529 	return attrs;
6530 }
6531 
6532 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
6533 			     const char *buf, size_t count)
6534 {
6535 	struct workqueue_struct *wq = dev_to_wq(dev);
6536 	struct workqueue_attrs *attrs;
6537 	int ret = -ENOMEM;
6538 
6539 	apply_wqattrs_lock();
6540 
6541 	attrs = wq_sysfs_prep_attrs(wq);
6542 	if (!attrs)
6543 		goto out_unlock;
6544 
6545 	if (sscanf(buf, "%d", &attrs->nice) == 1 &&
6546 	    attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
6547 		ret = apply_workqueue_attrs_locked(wq, attrs);
6548 	else
6549 		ret = -EINVAL;
6550 
6551 out_unlock:
6552 	apply_wqattrs_unlock();
6553 	free_workqueue_attrs(attrs);
6554 	return ret ?: count;
6555 }
6556 
6557 static ssize_t wq_cpumask_show(struct device *dev,
6558 			       struct device_attribute *attr, char *buf)
6559 {
6560 	struct workqueue_struct *wq = dev_to_wq(dev);
6561 	int written;
6562 
6563 	mutex_lock(&wq->mutex);
6564 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6565 			    cpumask_pr_args(wq->unbound_attrs->cpumask));
6566 	mutex_unlock(&wq->mutex);
6567 	return written;
6568 }
6569 
6570 static ssize_t wq_cpumask_store(struct device *dev,
6571 				struct device_attribute *attr,
6572 				const char *buf, size_t count)
6573 {
6574 	struct workqueue_struct *wq = dev_to_wq(dev);
6575 	struct workqueue_attrs *attrs;
6576 	int ret = -ENOMEM;
6577 
6578 	apply_wqattrs_lock();
6579 
6580 	attrs = wq_sysfs_prep_attrs(wq);
6581 	if (!attrs)
6582 		goto out_unlock;
6583 
6584 	ret = cpumask_parse(buf, attrs->cpumask);
6585 	if (!ret)
6586 		ret = apply_workqueue_attrs_locked(wq, attrs);
6587 
6588 out_unlock:
6589 	apply_wqattrs_unlock();
6590 	free_workqueue_attrs(attrs);
6591 	return ret ?: count;
6592 }
6593 
6594 static ssize_t wq_affn_scope_show(struct device *dev,
6595 				  struct device_attribute *attr, char *buf)
6596 {
6597 	struct workqueue_struct *wq = dev_to_wq(dev);
6598 	int written;
6599 
6600 	mutex_lock(&wq->mutex);
6601 	if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
6602 		written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
6603 				    wq_affn_names[WQ_AFFN_DFL],
6604 				    wq_affn_names[wq_affn_dfl]);
6605 	else
6606 		written = scnprintf(buf, PAGE_SIZE, "%s\n",
6607 				    wq_affn_names[wq->unbound_attrs->affn_scope]);
6608 	mutex_unlock(&wq->mutex);
6609 
6610 	return written;
6611 }
6612 
6613 static ssize_t wq_affn_scope_store(struct device *dev,
6614 				   struct device_attribute *attr,
6615 				   const char *buf, size_t count)
6616 {
6617 	struct workqueue_struct *wq = dev_to_wq(dev);
6618 	struct workqueue_attrs *attrs;
6619 	int affn, ret = -ENOMEM;
6620 
6621 	affn = parse_affn_scope(buf);
6622 	if (affn < 0)
6623 		return affn;
6624 
6625 	apply_wqattrs_lock();
6626 	attrs = wq_sysfs_prep_attrs(wq);
6627 	if (attrs) {
6628 		attrs->affn_scope = affn;
6629 		ret = apply_workqueue_attrs_locked(wq, attrs);
6630 	}
6631 	apply_wqattrs_unlock();
6632 	free_workqueue_attrs(attrs);
6633 	return ret ?: count;
6634 }
6635 
6636 static ssize_t wq_affinity_strict_show(struct device *dev,
6637 				       struct device_attribute *attr, char *buf)
6638 {
6639 	struct workqueue_struct *wq = dev_to_wq(dev);
6640 
6641 	return scnprintf(buf, PAGE_SIZE, "%d\n",
6642 			 wq->unbound_attrs->affn_strict);
6643 }
6644 
6645 static ssize_t wq_affinity_strict_store(struct device *dev,
6646 					struct device_attribute *attr,
6647 					const char *buf, size_t count)
6648 {
6649 	struct workqueue_struct *wq = dev_to_wq(dev);
6650 	struct workqueue_attrs *attrs;
6651 	int v, ret = -ENOMEM;
6652 
6653 	if (sscanf(buf, "%d", &v) != 1)
6654 		return -EINVAL;
6655 
6656 	apply_wqattrs_lock();
6657 	attrs = wq_sysfs_prep_attrs(wq);
6658 	if (attrs) {
6659 		attrs->affn_strict = (bool)v;
6660 		ret = apply_workqueue_attrs_locked(wq, attrs);
6661 	}
6662 	apply_wqattrs_unlock();
6663 	free_workqueue_attrs(attrs);
6664 	return ret ?: count;
6665 }
6666 
6667 static struct device_attribute wq_sysfs_unbound_attrs[] = {
6668 	__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
6669 	__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
6670 	__ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
6671 	__ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
6672 	__ATTR_NULL,
6673 };
6674 
6675 static struct bus_type wq_subsys = {
6676 	.name				= "workqueue",
6677 	.dev_groups			= wq_sysfs_groups,
6678 };
6679 
6680 /**
6681  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
6682  *  @cpumask: the cpumask to set
6683  *
6684  *  The low-level workqueues cpumask is a global cpumask that limits
6685  *  the affinity of all unbound workqueues.  This function check the @cpumask
6686  *  and apply it to all unbound workqueues and updates all pwqs of them.
6687  *
6688  *  Return:	0	- Success
6689  *		-EINVAL	- Invalid @cpumask
6690  *		-ENOMEM	- Failed to allocate memory for attrs or pwqs.
6691  */
6692 static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
6693 {
6694 	int ret = -EINVAL;
6695 
6696 	/*
6697 	 * Not excluding isolated cpus on purpose.
6698 	 * If the user wishes to include them, we allow that.
6699 	 */
6700 	cpumask_and(cpumask, cpumask, cpu_possible_mask);
6701 	if (!cpumask_empty(cpumask)) {
6702 		apply_wqattrs_lock();
6703 		cpumask_copy(wq_requested_unbound_cpumask, cpumask);
6704 		if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
6705 			ret = 0;
6706 			goto out_unlock;
6707 		}
6708 
6709 		ret = workqueue_apply_unbound_cpumask(cpumask);
6710 
6711 out_unlock:
6712 		apply_wqattrs_unlock();
6713 	}
6714 
6715 	return ret;
6716 }
6717 
6718 static ssize_t __wq_cpumask_show(struct device *dev,
6719 		struct device_attribute *attr, char *buf, cpumask_var_t mask)
6720 {
6721 	int written;
6722 
6723 	mutex_lock(&wq_pool_mutex);
6724 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
6725 	mutex_unlock(&wq_pool_mutex);
6726 
6727 	return written;
6728 }
6729 
6730 static ssize_t wq_unbound_cpumask_show(struct device *dev,
6731 		struct device_attribute *attr, char *buf)
6732 {
6733 	return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask);
6734 }
6735 
6736 static ssize_t wq_requested_cpumask_show(struct device *dev,
6737 		struct device_attribute *attr, char *buf)
6738 {
6739 	return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask);
6740 }
6741 
6742 static ssize_t wq_isolated_cpumask_show(struct device *dev,
6743 		struct device_attribute *attr, char *buf)
6744 {
6745 	return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask);
6746 }
6747 
6748 static ssize_t wq_unbound_cpumask_store(struct device *dev,
6749 		struct device_attribute *attr, const char *buf, size_t count)
6750 {
6751 	cpumask_var_t cpumask;
6752 	int ret;
6753 
6754 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6755 		return -ENOMEM;
6756 
6757 	ret = cpumask_parse(buf, cpumask);
6758 	if (!ret)
6759 		ret = workqueue_set_unbound_cpumask(cpumask);
6760 
6761 	free_cpumask_var(cpumask);
6762 	return ret ? ret : count;
6763 }
6764 
6765 static struct device_attribute wq_sysfs_cpumask_attrs[] = {
6766 	__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
6767 	       wq_unbound_cpumask_store),
6768 	__ATTR(cpumask_requested, 0444, wq_requested_cpumask_show, NULL),
6769 	__ATTR(cpumask_isolated, 0444, wq_isolated_cpumask_show, NULL),
6770 	__ATTR_NULL,
6771 };
6772 
6773 static int __init wq_sysfs_init(void)
6774 {
6775 	struct device *dev_root;
6776 	int err;
6777 
6778 	err = subsys_virtual_register(&wq_subsys, NULL);
6779 	if (err)
6780 		return err;
6781 
6782 	dev_root = bus_get_dev_root(&wq_subsys);
6783 	if (dev_root) {
6784 		struct device_attribute *attr;
6785 
6786 		for (attr = wq_sysfs_cpumask_attrs; attr->attr.name; attr++) {
6787 			err = device_create_file(dev_root, attr);
6788 			if (err)
6789 				break;
6790 		}
6791 		put_device(dev_root);
6792 	}
6793 	return err;
6794 }
6795 core_initcall(wq_sysfs_init);
6796 
6797 static void wq_device_release(struct device *dev)
6798 {
6799 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6800 
6801 	kfree(wq_dev);
6802 }
6803 
6804 /**
6805  * workqueue_sysfs_register - make a workqueue visible in sysfs
6806  * @wq: the workqueue to register
6807  *
6808  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
6809  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
6810  * which is the preferred method.
6811  *
6812  * Workqueue user should use this function directly iff it wants to apply
6813  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
6814  * apply_workqueue_attrs() may race against userland updating the
6815  * attributes.
6816  *
6817  * Return: 0 on success, -errno on failure.
6818  */
6819 int workqueue_sysfs_register(struct workqueue_struct *wq)
6820 {
6821 	struct wq_device *wq_dev;
6822 	int ret;
6823 
6824 	/*
6825 	 * Adjusting max_active or creating new pwqs by applying
6826 	 * attributes breaks ordering guarantee.  Disallow exposing ordered
6827 	 * workqueues.
6828 	 */
6829 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
6830 		return -EINVAL;
6831 
6832 	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
6833 	if (!wq_dev)
6834 		return -ENOMEM;
6835 
6836 	wq_dev->wq = wq;
6837 	wq_dev->dev.bus = &wq_subsys;
6838 	wq_dev->dev.release = wq_device_release;
6839 	dev_set_name(&wq_dev->dev, "%s", wq->name);
6840 
6841 	/*
6842 	 * unbound_attrs are created separately.  Suppress uevent until
6843 	 * everything is ready.
6844 	 */
6845 	dev_set_uevent_suppress(&wq_dev->dev, true);
6846 
6847 	ret = device_register(&wq_dev->dev);
6848 	if (ret) {
6849 		put_device(&wq_dev->dev);
6850 		wq->wq_dev = NULL;
6851 		return ret;
6852 	}
6853 
6854 	if (wq->flags & WQ_UNBOUND) {
6855 		struct device_attribute *attr;
6856 
6857 		for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
6858 			ret = device_create_file(&wq_dev->dev, attr);
6859 			if (ret) {
6860 				device_unregister(&wq_dev->dev);
6861 				wq->wq_dev = NULL;
6862 				return ret;
6863 			}
6864 		}
6865 	}
6866 
6867 	dev_set_uevent_suppress(&wq_dev->dev, false);
6868 	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
6869 	return 0;
6870 }
6871 
6872 /**
6873  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
6874  * @wq: the workqueue to unregister
6875  *
6876  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
6877  */
6878 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
6879 {
6880 	struct wq_device *wq_dev = wq->wq_dev;
6881 
6882 	if (!wq->wq_dev)
6883 		return;
6884 
6885 	wq->wq_dev = NULL;
6886 	device_unregister(&wq_dev->dev);
6887 }
6888 #else	/* CONFIG_SYSFS */
6889 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
6890 #endif	/* CONFIG_SYSFS */
6891 
6892 /*
6893  * Workqueue watchdog.
6894  *
6895  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
6896  * flush dependency, a concurrency managed work item which stays RUNNING
6897  * indefinitely.  Workqueue stalls can be very difficult to debug as the
6898  * usual warning mechanisms don't trigger and internal workqueue state is
6899  * largely opaque.
6900  *
6901  * Workqueue watchdog monitors all worker pools periodically and dumps
6902  * state if some pools failed to make forward progress for a while where
6903  * forward progress is defined as the first item on ->worklist changing.
6904  *
6905  * This mechanism is controlled through the kernel parameter
6906  * "workqueue.watchdog_thresh" which can be updated at runtime through the
6907  * corresponding sysfs parameter file.
6908  */
6909 #ifdef CONFIG_WQ_WATCHDOG
6910 
6911 static unsigned long wq_watchdog_thresh = 30;
6912 static struct timer_list wq_watchdog_timer;
6913 
6914 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
6915 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
6916 
6917 /*
6918  * Show workers that might prevent the processing of pending work items.
6919  * The only candidates are CPU-bound workers in the running state.
6920  * Pending work items should be handled by another idle worker
6921  * in all other situations.
6922  */
6923 static void show_cpu_pool_hog(struct worker_pool *pool)
6924 {
6925 	struct worker *worker;
6926 	unsigned long flags;
6927 	int bkt;
6928 
6929 	raw_spin_lock_irqsave(&pool->lock, flags);
6930 
6931 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6932 		if (task_is_running(worker->task)) {
6933 			/*
6934 			 * Defer printing to avoid deadlocks in console
6935 			 * drivers that queue work while holding locks
6936 			 * also taken in their write paths.
6937 			 */
6938 			printk_deferred_enter();
6939 
6940 			pr_info("pool %d:\n", pool->id);
6941 			sched_show_task(worker->task);
6942 
6943 			printk_deferred_exit();
6944 		}
6945 	}
6946 
6947 	raw_spin_unlock_irqrestore(&pool->lock, flags);
6948 }
6949 
6950 static void show_cpu_pools_hogs(void)
6951 {
6952 	struct worker_pool *pool;
6953 	int pi;
6954 
6955 	pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
6956 
6957 	rcu_read_lock();
6958 
6959 	for_each_pool(pool, pi) {
6960 		if (pool->cpu_stall)
6961 			show_cpu_pool_hog(pool);
6962 
6963 	}
6964 
6965 	rcu_read_unlock();
6966 }
6967 
6968 static void wq_watchdog_reset_touched(void)
6969 {
6970 	int cpu;
6971 
6972 	wq_watchdog_touched = jiffies;
6973 	for_each_possible_cpu(cpu)
6974 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6975 }
6976 
6977 static void wq_watchdog_timer_fn(struct timer_list *unused)
6978 {
6979 	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
6980 	bool lockup_detected = false;
6981 	bool cpu_pool_stall = false;
6982 	unsigned long now = jiffies;
6983 	struct worker_pool *pool;
6984 	int pi;
6985 
6986 	if (!thresh)
6987 		return;
6988 
6989 	rcu_read_lock();
6990 
6991 	for_each_pool(pool, pi) {
6992 		unsigned long pool_ts, touched, ts;
6993 
6994 		pool->cpu_stall = false;
6995 		if (list_empty(&pool->worklist))
6996 			continue;
6997 
6998 		/*
6999 		 * If a virtual machine is stopped by the host it can look to
7000 		 * the watchdog like a stall.
7001 		 */
7002 		kvm_check_and_clear_guest_paused();
7003 
7004 		/* get the latest of pool and touched timestamps */
7005 		if (pool->cpu >= 0)
7006 			touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
7007 		else
7008 			touched = READ_ONCE(wq_watchdog_touched);
7009 		pool_ts = READ_ONCE(pool->watchdog_ts);
7010 
7011 		if (time_after(pool_ts, touched))
7012 			ts = pool_ts;
7013 		else
7014 			ts = touched;
7015 
7016 		/* did we stall? */
7017 		if (time_after(now, ts + thresh)) {
7018 			lockup_detected = true;
7019 			if (pool->cpu >= 0) {
7020 				pool->cpu_stall = true;
7021 				cpu_pool_stall = true;
7022 			}
7023 			pr_emerg("BUG: workqueue lockup - pool");
7024 			pr_cont_pool_info(pool);
7025 			pr_cont(" stuck for %us!\n",
7026 				jiffies_to_msecs(now - pool_ts) / 1000);
7027 		}
7028 
7029 
7030 	}
7031 
7032 	rcu_read_unlock();
7033 
7034 	if (lockup_detected)
7035 		show_all_workqueues();
7036 
7037 	if (cpu_pool_stall)
7038 		show_cpu_pools_hogs();
7039 
7040 	wq_watchdog_reset_touched();
7041 	mod_timer(&wq_watchdog_timer, jiffies + thresh);
7042 }
7043 
7044 notrace void wq_watchdog_touch(int cpu)
7045 {
7046 	if (cpu >= 0)
7047 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
7048 
7049 	wq_watchdog_touched = jiffies;
7050 }
7051 
7052 static void wq_watchdog_set_thresh(unsigned long thresh)
7053 {
7054 	wq_watchdog_thresh = 0;
7055 	del_timer_sync(&wq_watchdog_timer);
7056 
7057 	if (thresh) {
7058 		wq_watchdog_thresh = thresh;
7059 		wq_watchdog_reset_touched();
7060 		mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
7061 	}
7062 }
7063 
7064 static int wq_watchdog_param_set_thresh(const char *val,
7065 					const struct kernel_param *kp)
7066 {
7067 	unsigned long thresh;
7068 	int ret;
7069 
7070 	ret = kstrtoul(val, 0, &thresh);
7071 	if (ret)
7072 		return ret;
7073 
7074 	if (system_wq)
7075 		wq_watchdog_set_thresh(thresh);
7076 	else
7077 		wq_watchdog_thresh = thresh;
7078 
7079 	return 0;
7080 }
7081 
7082 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
7083 	.set	= wq_watchdog_param_set_thresh,
7084 	.get	= param_get_ulong,
7085 };
7086 
7087 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
7088 		0644);
7089 
7090 static void wq_watchdog_init(void)
7091 {
7092 	timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
7093 	wq_watchdog_set_thresh(wq_watchdog_thresh);
7094 }
7095 
7096 #else	/* CONFIG_WQ_WATCHDOG */
7097 
7098 static inline void wq_watchdog_init(void) { }
7099 
7100 #endif	/* CONFIG_WQ_WATCHDOG */
7101 
7102 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
7103 {
7104 	if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
7105 		pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
7106 			cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
7107 		return;
7108 	}
7109 
7110 	cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
7111 }
7112 
7113 /**
7114  * workqueue_init_early - early init for workqueue subsystem
7115  *
7116  * This is the first step of three-staged workqueue subsystem initialization and
7117  * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
7118  * up. It sets up all the data structures and system workqueues and allows early
7119  * boot code to create workqueues and queue/cancel work items. Actual work item
7120  * execution starts only after kthreads can be created and scheduled right
7121  * before early initcalls.
7122  */
7123 void __init workqueue_init_early(void)
7124 {
7125 	struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
7126 	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
7127 	int i, cpu;
7128 
7129 	BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
7130 
7131 	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
7132 	BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL));
7133 	BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL));
7134 
7135 	cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
7136 	restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
7137 	restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
7138 	if (!cpumask_empty(&wq_cmdline_cpumask))
7139 		restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
7140 
7141 	cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask);
7142 
7143 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
7144 
7145 	wq_update_pod_attrs_buf = alloc_workqueue_attrs();
7146 	BUG_ON(!wq_update_pod_attrs_buf);
7147 
7148 	/*
7149 	 * If nohz_full is enabled, set power efficient workqueue as unbound.
7150 	 * This allows workqueue items to be moved to HK CPUs.
7151 	 */
7152 	if (housekeeping_enabled(HK_TYPE_TICK))
7153 		wq_power_efficient = true;
7154 
7155 	/* initialize WQ_AFFN_SYSTEM pods */
7156 	pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
7157 	pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
7158 	pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
7159 	BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
7160 
7161 	BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
7162 
7163 	pt->nr_pods = 1;
7164 	cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
7165 	pt->pod_node[0] = NUMA_NO_NODE;
7166 	pt->cpu_pod[0] = 0;
7167 
7168 	/* initialize CPU pools */
7169 	for_each_possible_cpu(cpu) {
7170 		struct worker_pool *pool;
7171 
7172 		i = 0;
7173 		for_each_cpu_worker_pool(pool, cpu) {
7174 			BUG_ON(init_worker_pool(pool));
7175 			pool->cpu = cpu;
7176 			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
7177 			cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
7178 			pool->attrs->nice = std_nice[i++];
7179 			pool->attrs->affn_strict = true;
7180 			pool->node = cpu_to_node(cpu);
7181 
7182 			/* alloc pool ID */
7183 			mutex_lock(&wq_pool_mutex);
7184 			BUG_ON(worker_pool_assign_id(pool));
7185 			mutex_unlock(&wq_pool_mutex);
7186 		}
7187 	}
7188 
7189 	/* create default unbound and ordered wq attrs */
7190 	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
7191 		struct workqueue_attrs *attrs;
7192 
7193 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
7194 		attrs->nice = std_nice[i];
7195 		unbound_std_wq_attrs[i] = attrs;
7196 
7197 		/*
7198 		 * An ordered wq should have only one pwq as ordering is
7199 		 * guaranteed by max_active which is enforced by pwqs.
7200 		 */
7201 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
7202 		attrs->nice = std_nice[i];
7203 		attrs->ordered = true;
7204 		ordered_wq_attrs[i] = attrs;
7205 	}
7206 
7207 	system_wq = alloc_workqueue("events", 0, 0);
7208 	system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
7209 	system_long_wq = alloc_workqueue("events_long", 0, 0);
7210 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
7211 					    WQ_MAX_ACTIVE);
7212 	system_freezable_wq = alloc_workqueue("events_freezable",
7213 					      WQ_FREEZABLE, 0);
7214 	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
7215 					      WQ_POWER_EFFICIENT, 0);
7216 	system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient",
7217 					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
7218 					      0);
7219 	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
7220 	       !system_unbound_wq || !system_freezable_wq ||
7221 	       !system_power_efficient_wq ||
7222 	       !system_freezable_power_efficient_wq);
7223 }
7224 
7225 static void __init wq_cpu_intensive_thresh_init(void)
7226 {
7227 	unsigned long thresh;
7228 	unsigned long bogo;
7229 
7230 	pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
7231 	BUG_ON(IS_ERR(pwq_release_worker));
7232 
7233 	/* if the user set it to a specific value, keep it */
7234 	if (wq_cpu_intensive_thresh_us != ULONG_MAX)
7235 		return;
7236 
7237 	/*
7238 	 * The default of 10ms is derived from the fact that most modern (as of
7239 	 * 2023) processors can do a lot in 10ms and that it's just below what
7240 	 * most consider human-perceivable. However, the kernel also runs on a
7241 	 * lot slower CPUs including microcontrollers where the threshold is way
7242 	 * too low.
7243 	 *
7244 	 * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
7245 	 * This is by no means accurate but it doesn't have to be. The mechanism
7246 	 * is still useful even when the threshold is fully scaled up. Also, as
7247 	 * the reports would usually be applicable to everyone, some machines
7248 	 * operating on longer thresholds won't significantly diminish their
7249 	 * usefulness.
7250 	 */
7251 	thresh = 10 * USEC_PER_MSEC;
7252 
7253 	/* see init/calibrate.c for lpj -> BogoMIPS calculation */
7254 	bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
7255 	if (bogo < 4000)
7256 		thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
7257 
7258 	pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
7259 		 loops_per_jiffy, bogo, thresh);
7260 
7261 	wq_cpu_intensive_thresh_us = thresh;
7262 }
7263 
7264 /**
7265  * workqueue_init - bring workqueue subsystem fully online
7266  *
7267  * This is the second step of three-staged workqueue subsystem initialization
7268  * and invoked as soon as kthreads can be created and scheduled. Workqueues have
7269  * been created and work items queued on them, but there are no kworkers
7270  * executing the work items yet. Populate the worker pools with the initial
7271  * workers and enable future kworker creations.
7272  */
7273 void __init workqueue_init(void)
7274 {
7275 	struct workqueue_struct *wq;
7276 	struct worker_pool *pool;
7277 	int cpu, bkt;
7278 
7279 	wq_cpu_intensive_thresh_init();
7280 
7281 	mutex_lock(&wq_pool_mutex);
7282 
7283 	/*
7284 	 * Per-cpu pools created earlier could be missing node hint. Fix them
7285 	 * up. Also, create a rescuer for workqueues that requested it.
7286 	 */
7287 	for_each_possible_cpu(cpu) {
7288 		for_each_cpu_worker_pool(pool, cpu) {
7289 			pool->node = cpu_to_node(cpu);
7290 		}
7291 	}
7292 
7293 	list_for_each_entry(wq, &workqueues, list) {
7294 		WARN(init_rescuer(wq),
7295 		     "workqueue: failed to create early rescuer for %s",
7296 		     wq->name);
7297 	}
7298 
7299 	mutex_unlock(&wq_pool_mutex);
7300 
7301 	/* create the initial workers */
7302 	for_each_online_cpu(cpu) {
7303 		for_each_cpu_worker_pool(pool, cpu) {
7304 			pool->flags &= ~POOL_DISASSOCIATED;
7305 			BUG_ON(!create_worker(pool));
7306 		}
7307 	}
7308 
7309 	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
7310 		BUG_ON(!create_worker(pool));
7311 
7312 	wq_online = true;
7313 	wq_watchdog_init();
7314 }
7315 
7316 /*
7317  * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
7318  * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
7319  * and consecutive pod ID. The rest of @pt is initialized accordingly.
7320  */
7321 static void __init init_pod_type(struct wq_pod_type *pt,
7322 				 bool (*cpus_share_pod)(int, int))
7323 {
7324 	int cur, pre, cpu, pod;
7325 
7326 	pt->nr_pods = 0;
7327 
7328 	/* init @pt->cpu_pod[] according to @cpus_share_pod() */
7329 	pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
7330 	BUG_ON(!pt->cpu_pod);
7331 
7332 	for_each_possible_cpu(cur) {
7333 		for_each_possible_cpu(pre) {
7334 			if (pre >= cur) {
7335 				pt->cpu_pod[cur] = pt->nr_pods++;
7336 				break;
7337 			}
7338 			if (cpus_share_pod(cur, pre)) {
7339 				pt->cpu_pod[cur] = pt->cpu_pod[pre];
7340 				break;
7341 			}
7342 		}
7343 	}
7344 
7345 	/* init the rest to match @pt->cpu_pod[] */
7346 	pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
7347 	pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
7348 	BUG_ON(!pt->pod_cpus || !pt->pod_node);
7349 
7350 	for (pod = 0; pod < pt->nr_pods; pod++)
7351 		BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
7352 
7353 	for_each_possible_cpu(cpu) {
7354 		cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
7355 		pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
7356 	}
7357 }
7358 
7359 static bool __init cpus_dont_share(int cpu0, int cpu1)
7360 {
7361 	return false;
7362 }
7363 
7364 static bool __init cpus_share_smt(int cpu0, int cpu1)
7365 {
7366 #ifdef CONFIG_SCHED_SMT
7367 	return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
7368 #else
7369 	return false;
7370 #endif
7371 }
7372 
7373 static bool __init cpus_share_numa(int cpu0, int cpu1)
7374 {
7375 	return cpu_to_node(cpu0) == cpu_to_node(cpu1);
7376 }
7377 
7378 /**
7379  * workqueue_init_topology - initialize CPU pods for unbound workqueues
7380  *
7381  * This is the third step of there-staged workqueue subsystem initialization and
7382  * invoked after SMP and topology information are fully initialized. It
7383  * initializes the unbound CPU pods accordingly.
7384  */
7385 void __init workqueue_init_topology(void)
7386 {
7387 	struct workqueue_struct *wq;
7388 	int cpu;
7389 
7390 	init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
7391 	init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
7392 	init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
7393 	init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
7394 
7395 	mutex_lock(&wq_pool_mutex);
7396 
7397 	/*
7398 	 * Workqueues allocated earlier would have all CPUs sharing the default
7399 	 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
7400 	 * combinations to apply per-pod sharing.
7401 	 */
7402 	list_for_each_entry(wq, &workqueues, list) {
7403 		for_each_online_cpu(cpu)
7404 			wq_update_pod(wq, cpu, cpu, true);
7405 		if (wq->flags & WQ_UNBOUND) {
7406 			mutex_lock(&wq->mutex);
7407 			wq_update_node_max_active(wq, -1);
7408 			mutex_unlock(&wq->mutex);
7409 		}
7410 	}
7411 
7412 	mutex_unlock(&wq_pool_mutex);
7413 }
7414 
7415 void __warn_flushing_systemwide_wq(void)
7416 {
7417 	pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
7418 	dump_stack();
7419 }
7420 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
7421 
7422 static int __init workqueue_unbound_cpus_setup(char *str)
7423 {
7424 	if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
7425 		cpumask_clear(&wq_cmdline_cpumask);
7426 		pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
7427 	}
7428 
7429 	return 1;
7430 }
7431 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);
7432