xref: /linux-6.15/kernel/workqueue.c (revision 3f959aa3)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
3c54fce6eSTejun Heo  * kernel/workqueue.c - generic async execution with shared worker pool
41da177e4SLinus Torvalds  *
5c54fce6eSTejun Heo  * Copyright (C) 2002		Ingo Molnar
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *   Derived from the taskqueue/keventd code by:
81da177e4SLinus Torvalds  *     David Woodhouse <[email protected]>
9e1f8e874SFrancois Cami  *     Andrew Morton
101da177e4SLinus Torvalds  *     Kai Petzke <[email protected]>
111da177e4SLinus Torvalds  *     Theodore Ts'o <[email protected]>
1289ada679SChristoph Lameter  *
13cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
14c54fce6eSTejun Heo  *
15c54fce6eSTejun Heo  * Copyright (C) 2010		SUSE Linux Products GmbH
16c54fce6eSTejun Heo  * Copyright (C) 2010		Tejun Heo <[email protected]>
17c54fce6eSTejun Heo  *
18c54fce6eSTejun Heo  * This is the generic async execution mechanism.  Work items as are
19c54fce6eSTejun Heo  * executed in process context.  The worker pool is shared and
20b11895c4SLibin  * automatically managed.  There are two worker pools for each CPU (one for
21b11895c4SLibin  * normal work items and the other for high priority ones) and some extra
22b11895c4SLibin  * pools for workqueues which are not bound to any specific CPU - the
23b11895c4SLibin  * number of these backing pools is dynamic.
24c54fce6eSTejun Heo  *
259a261491SBenjamin Peterson  * Please read Documentation/core-api/workqueue.rst for details.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
289984de1aSPaul Gortmaker #include <linux/export.h>
291da177e4SLinus Torvalds #include <linux/kernel.h>
301da177e4SLinus Torvalds #include <linux/sched.h>
311da177e4SLinus Torvalds #include <linux/init.h>
321da177e4SLinus Torvalds #include <linux/signal.h>
331da177e4SLinus Torvalds #include <linux/completion.h>
341da177e4SLinus Torvalds #include <linux/workqueue.h>
351da177e4SLinus Torvalds #include <linux/slab.h>
361da177e4SLinus Torvalds #include <linux/cpu.h>
371da177e4SLinus Torvalds #include <linux/notifier.h>
381da177e4SLinus Torvalds #include <linux/kthread.h>
391fa44ecaSJames Bottomley #include <linux/hardirq.h>
4046934023SChristoph Lameter #include <linux/mempolicy.h>
41341a5958SRafael J. Wysocki #include <linux/freezer.h>
42d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
434e6045f1SJohannes Berg #include <linux/lockdep.h>
44c34056a3STejun Heo #include <linux/idr.h>
4529c91e99STejun Heo #include <linux/jhash.h>
4642f8570fSSasha Levin #include <linux/hashtable.h>
4776af4d93STejun Heo #include <linux/rculist.h>
48bce90380STejun Heo #include <linux/nodemask.h>
494c16bd32STejun Heo #include <linux/moduleparam.h>
503d1cb205STejun Heo #include <linux/uaccess.h>
51c98a9805STal Shorer #include <linux/sched/isolation.h>
5262635ea8SSergey Senozhatsky #include <linux/nmi.h>
53940d71c6SSergey Senozhatsky #include <linux/kvm_para.h>
54e22bee78STejun Heo 
55ea138446STejun Heo #include "workqueue_internal.h"
561da177e4SLinus Torvalds 
57c8e55f36STejun Heo enum {
58bc2ae0f5STejun Heo 	/*
5924647570STejun Heo 	 * worker_pool flags
60bc2ae0f5STejun Heo 	 *
6124647570STejun Heo 	 * A bound pool is either associated or disassociated with its CPU.
62bc2ae0f5STejun Heo 	 * While associated (!DISASSOCIATED), all workers are bound to the
63bc2ae0f5STejun Heo 	 * CPU and none has %WORKER_UNBOUND set and concurrency management
64bc2ae0f5STejun Heo 	 * is in effect.
65bc2ae0f5STejun Heo 	 *
66bc2ae0f5STejun Heo 	 * While DISASSOCIATED, the cpu may be offline and all workers have
67bc2ae0f5STejun Heo 	 * %WORKER_UNBOUND set and concurrency management disabled, and may
6824647570STejun Heo 	 * be executing on any CPU.  The pool behaves as an unbound one.
69bc2ae0f5STejun Heo 	 *
70bc3a1afcSTejun Heo 	 * Note that DISASSOCIATED should be flipped only while holding
711258fae7STejun Heo 	 * wq_pool_attach_mutex to avoid changing binding state while
724736cbf7SLai Jiangshan 	 * worker_attach_to_pool() is in progress.
73bc2ae0f5STejun Heo 	 */
74692b4825STejun Heo 	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
7524647570STejun Heo 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
76db7bccf4STejun Heo 
77c8e55f36STejun Heo 	/* worker flags */
78c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
79c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
80e22bee78STejun Heo 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
81fb0e7bebSTejun Heo 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
82f3421797STejun Heo 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
83a9ab775bSTejun Heo 	WORKER_REBOUND		= 1 << 8,	/* worker was rebound */
84e22bee78STejun Heo 
85a9ab775bSTejun Heo 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE |
86a9ab775bSTejun Heo 				  WORKER_UNBOUND | WORKER_REBOUND,
87db7bccf4STejun Heo 
88e34cdddbSTejun Heo 	NR_STD_WORKER_POOLS	= 2,		/* # standard pools per cpu */
894ce62e9eSTejun Heo 
9029c91e99STejun Heo 	UNBOUND_POOL_HASH_ORDER	= 6,		/* hashed by pool->attrs */
91c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
92db7bccf4STejun Heo 
93e22bee78STejun Heo 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
94e22bee78STejun Heo 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
95e22bee78STejun Heo 
963233cdbdSTejun Heo 	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
973233cdbdSTejun Heo 						/* call for help after 10ms
983233cdbdSTejun Heo 						   (min two ticks) */
99e22bee78STejun Heo 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
100e22bee78STejun Heo 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
1011da177e4SLinus Torvalds 
1021da177e4SLinus Torvalds 	/*
103e22bee78STejun Heo 	 * Rescue workers are used only on emergencies and shared by
1048698a745SDongsheng Yang 	 * all cpus.  Give MIN_NICE.
105e22bee78STejun Heo 	 */
1068698a745SDongsheng Yang 	RESCUER_NICE_LEVEL	= MIN_NICE,
1078698a745SDongsheng Yang 	HIGHPRI_NICE_LEVEL	= MIN_NICE,
108ecf6881fSTejun Heo 
109ecf6881fSTejun Heo 	WQ_NAME_LEN		= 24,
110c8e55f36STejun Heo };
111c8e55f36STejun Heo 
1121da177e4SLinus Torvalds /*
1134690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
1144690c4abSTejun Heo  *
115e41e704bSTejun Heo  * I: Modifiable by initialization/destruction paths and read-only for
116e41e704bSTejun Heo  *    everyone else.
1174690c4abSTejun Heo  *
118e22bee78STejun Heo  * P: Preemption protected.  Disabling preemption is enough and should
119e22bee78STejun Heo  *    only be modified and accessed from the local cpu.
120e22bee78STejun Heo  *
121d565ed63STejun Heo  * L: pool->lock protected.  Access with pool->lock held.
1224690c4abSTejun Heo  *
123d565ed63STejun Heo  * X: During normal operation, modification requires pool->lock and should
124d565ed63STejun Heo  *    be done only from local cpu.  Either disabling preemption on local
125d565ed63STejun Heo  *    cpu or grabbing pool->lock is enough for read access.  If
126d565ed63STejun Heo  *    POOL_DISASSOCIATED is set, it's identical to L.
127e22bee78STejun Heo  *
1281258fae7STejun Heo  * A: wq_pool_attach_mutex protected.
129822d8405STejun Heo  *
13068e13a67SLai Jiangshan  * PL: wq_pool_mutex protected.
13176af4d93STejun Heo  *
13224acfb71SThomas Gleixner  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
1335bcab335STejun Heo  *
1345b95e1afSLai Jiangshan  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
1355b95e1afSLai Jiangshan  *
1365b95e1afSLai Jiangshan  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
13724acfb71SThomas Gleixner  *      RCU for reads.
1385b95e1afSLai Jiangshan  *
1393c25a55dSLai Jiangshan  * WQ: wq->mutex protected.
1403c25a55dSLai Jiangshan  *
14124acfb71SThomas Gleixner  * WR: wq->mutex protected for writes.  RCU protected for reads.
1422e109a28STejun Heo  *
1432e109a28STejun Heo  * MD: wq_mayday_lock protected.
1444690c4abSTejun Heo  */
1454690c4abSTejun Heo 
1462eaebdb3STejun Heo /* struct worker is defined in workqueue_internal.h */
147c34056a3STejun Heo 
148bd7bdd43STejun Heo struct worker_pool {
149a9b8a985SSebastian Andrzej Siewior 	raw_spinlock_t		lock;		/* the pool lock */
150d84ff051STejun Heo 	int			cpu;		/* I: the associated cpu */
151f3f90ad4STejun Heo 	int			node;		/* I: the associated node ID */
1529daf9e67STejun Heo 	int			id;		/* I: pool ID */
15311ebea50STejun Heo 	unsigned int		flags;		/* X: flags */
154bd7bdd43STejun Heo 
15582607adcSTejun Heo 	unsigned long		watchdog_ts;	/* L: watchdog timestamp */
15682607adcSTejun Heo 
157bc35f7efSLai Jiangshan 	/*
158bc35f7efSLai Jiangshan 	 * The counter is incremented in a process context on the associated CPU
159bc35f7efSLai Jiangshan 	 * w/ preemption disabled, and decremented or reset in the same context
160bc35f7efSLai Jiangshan 	 * but w/ pool->lock held. The readers grab pool->lock and are
161bc35f7efSLai Jiangshan 	 * guaranteed to see if the counter reached zero.
162bc35f7efSLai Jiangshan 	 */
163bc35f7efSLai Jiangshan 	int			nr_running;
16484f91c62SLai Jiangshan 
165bd7bdd43STejun Heo 	struct list_head	worklist;	/* L: list of pending works */
166ea1abd61SLai Jiangshan 
1675826cc8fSLai Jiangshan 	int			nr_workers;	/* L: total number of workers */
1685826cc8fSLai Jiangshan 	int			nr_idle;	/* L: currently idle workers */
169bd7bdd43STejun Heo 
1702c1f1a91SLai Jiangshan 	struct list_head	idle_list;	/* L: list of idle workers */
171bd7bdd43STejun Heo 	struct timer_list	idle_timer;	/* L: worker idle timeout */
172*3f959aa3SValentin Schneider 	struct work_struct      idle_cull_work; /* L: worker idle cleanup */
173*3f959aa3SValentin Schneider 
174bd7bdd43STejun Heo 	struct timer_list	mayday_timer;	  /* L: SOS timer for workers */
175bd7bdd43STejun Heo 
176c5aa87bbSTejun Heo 	/* a workers is either on busy_hash or idle_list, or the manager */
177c9e7cf27STejun Heo 	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
178c9e7cf27STejun Heo 						/* L: hash of busy workers */
179c9e7cf27STejun Heo 
1802607d7a6STejun Heo 	struct worker		*manager;	/* L: purely informational */
18192f9c5c4SLai Jiangshan 	struct list_head	workers;	/* A: attached workers */
18260f5a4bcSLai Jiangshan 	struct completion	*detach_completion; /* all workers detached */
183e19e397aSTejun Heo 
1847cda9aaeSLai Jiangshan 	struct ida		worker_ida;	/* worker IDs for task name */
185e19e397aSTejun Heo 
1867a4e344cSTejun Heo 	struct workqueue_attrs	*attrs;		/* I: worker attributes */
18768e13a67SLai Jiangshan 	struct hlist_node	hash_node;	/* PL: unbound_pool_hash node */
18868e13a67SLai Jiangshan 	int			refcnt;		/* PL: refcnt for unbound pools */
1897a4e344cSTejun Heo 
190e19e397aSTejun Heo 	/*
19124acfb71SThomas Gleixner 	 * Destruction of pool is RCU protected to allow dereferences
19229c91e99STejun Heo 	 * from get_work_pool().
19329c91e99STejun Heo 	 */
19429c91e99STejun Heo 	struct rcu_head		rcu;
19584f91c62SLai Jiangshan };
1968b03ae3cSTejun Heo 
1978b03ae3cSTejun Heo /*
198112202d9STejun Heo  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
199112202d9STejun Heo  * of work_struct->data are used for flags and the remaining high bits
200112202d9STejun Heo  * point to the pwq; thus, pwqs need to be aligned at two's power of the
201112202d9STejun Heo  * number of flag bits.
2021da177e4SLinus Torvalds  */
203112202d9STejun Heo struct pool_workqueue {
204bd7bdd43STejun Heo 	struct worker_pool	*pool;		/* I: the associated pool */
2054690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
20673f53c4aSTejun Heo 	int			work_color;	/* L: current color */
20773f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
2088864b4e5STejun Heo 	int			refcnt;		/* L: reference count */
20973f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
21073f53c4aSTejun Heo 						/* L: nr of in_flight works */
211018f3a13SLai Jiangshan 
212018f3a13SLai Jiangshan 	/*
213018f3a13SLai Jiangshan 	 * nr_active management and WORK_STRUCT_INACTIVE:
214018f3a13SLai Jiangshan 	 *
215018f3a13SLai Jiangshan 	 * When pwq->nr_active >= max_active, new work item is queued to
216018f3a13SLai Jiangshan 	 * pwq->inactive_works instead of pool->worklist and marked with
217018f3a13SLai Jiangshan 	 * WORK_STRUCT_INACTIVE.
218018f3a13SLai Jiangshan 	 *
219018f3a13SLai Jiangshan 	 * All work items marked with WORK_STRUCT_INACTIVE do not participate
220018f3a13SLai Jiangshan 	 * in pwq->nr_active and all work items in pwq->inactive_works are
221018f3a13SLai Jiangshan 	 * marked with WORK_STRUCT_INACTIVE.  But not all WORK_STRUCT_INACTIVE
222018f3a13SLai Jiangshan 	 * work items are in pwq->inactive_works.  Some of them are ready to
223018f3a13SLai Jiangshan 	 * run in pool->worklist or worker->scheduled.  Those work itmes are
224018f3a13SLai Jiangshan 	 * only struct wq_barrier which is used for flush_work() and should
225018f3a13SLai Jiangshan 	 * not participate in pwq->nr_active.  For non-barrier work item, it
226018f3a13SLai Jiangshan 	 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
227018f3a13SLai Jiangshan 	 */
2281e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
229a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
230f97a4a1aSLai Jiangshan 	struct list_head	inactive_works;	/* L: inactive works */
2313c25a55dSLai Jiangshan 	struct list_head	pwqs_node;	/* WR: node on wq->pwqs */
2322e109a28STejun Heo 	struct list_head	mayday_node;	/* MD: node on wq->maydays */
2338864b4e5STejun Heo 
2348864b4e5STejun Heo 	/*
2358864b4e5STejun Heo 	 * Release of unbound pwq is punted to system_wq.  See put_pwq()
2368864b4e5STejun Heo 	 * and pwq_unbound_release_workfn() for details.  pool_workqueue
23724acfb71SThomas Gleixner 	 * itself is also RCU protected so that the first pwq can be
238b09f4fd3SLai Jiangshan 	 * determined without grabbing wq->mutex.
2398864b4e5STejun Heo 	 */
2408864b4e5STejun Heo 	struct work_struct	unbound_release_work;
2418864b4e5STejun Heo 	struct rcu_head		rcu;
242e904e6c2STejun Heo } __aligned(1 << WORK_STRUCT_FLAG_BITS);
2431da177e4SLinus Torvalds 
2441da177e4SLinus Torvalds /*
24573f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
24673f53c4aSTejun Heo  */
24773f53c4aSTejun Heo struct wq_flusher {
2483c25a55dSLai Jiangshan 	struct list_head	list;		/* WQ: list of flushers */
2493c25a55dSLai Jiangshan 	int			flush_color;	/* WQ: flush color waiting for */
25073f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
25173f53c4aSTejun Heo };
2521da177e4SLinus Torvalds 
253226223abSTejun Heo struct wq_device;
254226223abSTejun Heo 
25573f53c4aSTejun Heo /*
256c5aa87bbSTejun Heo  * The externally visible workqueue.  It relays the issued work items to
257c5aa87bbSTejun Heo  * the appropriate worker_pool through its pool_workqueues.
2581da177e4SLinus Torvalds  */
2591da177e4SLinus Torvalds struct workqueue_struct {
2603c25a55dSLai Jiangshan 	struct list_head	pwqs;		/* WR: all pwqs of this wq */
261e2dca7adSTejun Heo 	struct list_head	list;		/* PR: list of all workqueues */
26273f53c4aSTejun Heo 
2633c25a55dSLai Jiangshan 	struct mutex		mutex;		/* protects this wq */
2643c25a55dSLai Jiangshan 	int			work_color;	/* WQ: current work color */
2653c25a55dSLai Jiangshan 	int			flush_color;	/* WQ: current flush color */
266112202d9STejun Heo 	atomic_t		nr_pwqs_to_flush; /* flush in progress */
2673c25a55dSLai Jiangshan 	struct wq_flusher	*first_flusher;	/* WQ: first flusher */
2683c25a55dSLai Jiangshan 	struct list_head	flusher_queue;	/* WQ: flush waiters */
2693c25a55dSLai Jiangshan 	struct list_head	flusher_overflow; /* WQ: flush overflow list */
27073f53c4aSTejun Heo 
2712e109a28STejun Heo 	struct list_head	maydays;	/* MD: pwqs requesting rescue */
27230ae2fc0STejun Heo 	struct worker		*rescuer;	/* MD: rescue worker */
273e22bee78STejun Heo 
27487fc741eSLai Jiangshan 	int			nr_drainers;	/* WQ: drain in progress */
275a357fc03SLai Jiangshan 	int			saved_max_active; /* WQ: saved pwq max_active */
276226223abSTejun Heo 
2775b95e1afSLai Jiangshan 	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
2785b95e1afSLai Jiangshan 	struct pool_workqueue	*dfl_pwq;	/* PW: only for unbound wqs */
2796029a918STejun Heo 
280226223abSTejun Heo #ifdef CONFIG_SYSFS
281226223abSTejun Heo 	struct wq_device	*wq_dev;	/* I: for sysfs interface */
282226223abSTejun Heo #endif
2834e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
284669de8bdSBart Van Assche 	char			*lock_name;
285669de8bdSBart Van Assche 	struct lock_class_key	key;
2864e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
2874e6045f1SJohannes Berg #endif
288ecf6881fSTejun Heo 	char			name[WQ_NAME_LEN]; /* I: workqueue name */
2892728fd2fSTejun Heo 
290e2dca7adSTejun Heo 	/*
29124acfb71SThomas Gleixner 	 * Destruction of workqueue_struct is RCU protected to allow walking
29224acfb71SThomas Gleixner 	 * the workqueues list without grabbing wq_pool_mutex.
293e2dca7adSTejun Heo 	 * This is used to dump all workqueues from sysrq.
294e2dca7adSTejun Heo 	 */
295e2dca7adSTejun Heo 	struct rcu_head		rcu;
296e2dca7adSTejun Heo 
2972728fd2fSTejun Heo 	/* hot fields used during command issue, aligned to cacheline */
2982728fd2fSTejun Heo 	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
2992728fd2fSTejun Heo 	struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
3005b95e1afSLai Jiangshan 	struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
3011da177e4SLinus Torvalds };
3021da177e4SLinus Torvalds 
303e904e6c2STejun Heo static struct kmem_cache *pwq_cache;
304e904e6c2STejun Heo 
305bce90380STejun Heo static cpumask_var_t *wq_numa_possible_cpumask;
306bce90380STejun Heo 					/* possible CPUs of each node */
307bce90380STejun Heo 
308d55262c4STejun Heo static bool wq_disable_numa;
309d55262c4STejun Heo module_param_named(disable_numa, wq_disable_numa, bool, 0444);
310d55262c4STejun Heo 
311cee22a15SViresh Kumar /* see the comment above the definition of WQ_POWER_EFFICIENT */
312552f530cSLuis R. Rodriguez static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
313cee22a15SViresh Kumar module_param_named(power_efficient, wq_power_efficient, bool, 0444);
314cee22a15SViresh Kumar 
315863b710bSTejun Heo static bool wq_online;			/* can kworkers be created yet? */
3163347fa09STejun Heo 
317bce90380STejun Heo static bool wq_numa_enabled;		/* unbound NUMA affinity enabled */
318bce90380STejun Heo 
3194c16bd32STejun Heo /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
3204c16bd32STejun Heo static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
3214c16bd32STejun Heo 
32268e13a67SLai Jiangshan static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
3231258fae7STejun Heo static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
324a9b8a985SSebastian Andrzej Siewior static DEFINE_RAW_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
325d8bb65abSSebastian Andrzej Siewior /* wait for manager to go away */
326d8bb65abSSebastian Andrzej Siewior static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
3275bcab335STejun Heo 
328e2dca7adSTejun Heo static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
32968e13a67SLai Jiangshan static bool workqueue_freezing;		/* PL: have wqs started freezing? */
3307d19c5ceSTejun Heo 
33199c621efSLai Jiangshan /* PL&A: allowable cpus for unbound wqs and work items */
332ef557180SMike Galbraith static cpumask_var_t wq_unbound_cpumask;
333ef557180SMike Galbraith 
334ef557180SMike Galbraith /* CPU where unbound work was last round robin scheduled from this CPU */
335ef557180SMike Galbraith static DEFINE_PER_CPU(int, wq_rr_cpu_last);
336b05a7928SFrederic Weisbecker 
337f303fccbSTejun Heo /*
338f303fccbSTejun Heo  * Local execution of unbound work items is no longer guaranteed.  The
339f303fccbSTejun Heo  * following always forces round-robin CPU selection on unbound work items
340f303fccbSTejun Heo  * to uncover usages which depend on it.
341f303fccbSTejun Heo  */
342f303fccbSTejun Heo #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
343f303fccbSTejun Heo static bool wq_debug_force_rr_cpu = true;
344f303fccbSTejun Heo #else
345f303fccbSTejun Heo static bool wq_debug_force_rr_cpu = false;
346f303fccbSTejun Heo #endif
347f303fccbSTejun Heo module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
348f303fccbSTejun Heo 
3497d19c5ceSTejun Heo /* the per-cpu worker pools */
35025528213SPeter Zijlstra static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
3517d19c5ceSTejun Heo 
35268e13a67SLai Jiangshan static DEFINE_IDR(worker_pool_idr);	/* PR: idr of all pools */
3537d19c5ceSTejun Heo 
35468e13a67SLai Jiangshan /* PL: hash of all unbound pools keyed by pool->attrs */
35529c91e99STejun Heo static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
35629c91e99STejun Heo 
357c5aa87bbSTejun Heo /* I: attributes used when instantiating standard unbound pools on demand */
35829c91e99STejun Heo static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
35929c91e99STejun Heo 
3608a2b7538STejun Heo /* I: attributes used when instantiating ordered pools on demand */
3618a2b7538STejun Heo static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
3628a2b7538STejun Heo 
363d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly;
364ad7b1f84SMarc Dionne EXPORT_SYMBOL(system_wq);
365044c782cSValentin Ilie struct workqueue_struct *system_highpri_wq __read_mostly;
3661aabe902SJoonsoo Kim EXPORT_SYMBOL_GPL(system_highpri_wq);
367044c782cSValentin Ilie struct workqueue_struct *system_long_wq __read_mostly;
368d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq);
369044c782cSValentin Ilie struct workqueue_struct *system_unbound_wq __read_mostly;
370f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq);
371044c782cSValentin Ilie struct workqueue_struct *system_freezable_wq __read_mostly;
37224d51addSTejun Heo EXPORT_SYMBOL_GPL(system_freezable_wq);
3730668106cSViresh Kumar struct workqueue_struct *system_power_efficient_wq __read_mostly;
3740668106cSViresh Kumar EXPORT_SYMBOL_GPL(system_power_efficient_wq);
3750668106cSViresh Kumar struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
3760668106cSViresh Kumar EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
377d320c038STejun Heo 
3787d19c5ceSTejun Heo static int worker_thread(void *__worker);
3796ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
380c29eb853STejun Heo static void show_pwq(struct pool_workqueue *pwq);
38155df0933SImran Khan static void show_one_worker_pool(struct worker_pool *pool);
3827d19c5ceSTejun Heo 
38397bd2347STejun Heo #define CREATE_TRACE_POINTS
38497bd2347STejun Heo #include <trace/events/workqueue.h>
38597bd2347STejun Heo 
38668e13a67SLai Jiangshan #define assert_rcu_or_pool_mutex()					\
38724acfb71SThomas Gleixner 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
388f78f5b90SPaul E. McKenney 			 !lockdep_is_held(&wq_pool_mutex),		\
38924acfb71SThomas Gleixner 			 "RCU or wq_pool_mutex should be held")
3905bcab335STejun Heo 
3915b95e1afSLai Jiangshan #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
39224acfb71SThomas Gleixner 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
393f78f5b90SPaul E. McKenney 			 !lockdep_is_held(&wq->mutex) &&		\
394f78f5b90SPaul E. McKenney 			 !lockdep_is_held(&wq_pool_mutex),		\
39524acfb71SThomas Gleixner 			 "RCU, wq->mutex or wq_pool_mutex should be held")
3965b95e1afSLai Jiangshan 
397f02ae73aSTejun Heo #define for_each_cpu_worker_pool(pool, cpu)				\
398f02ae73aSTejun Heo 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
399f02ae73aSTejun Heo 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
4007a62c2c8STejun Heo 	     (pool)++)
4014ce62e9eSTejun Heo 
40249e3cf44STejun Heo /**
40317116969STejun Heo  * for_each_pool - iterate through all worker_pools in the system
40417116969STejun Heo  * @pool: iteration cursor
405611c92a0STejun Heo  * @pi: integer used for iteration
406fa1b54e6STejun Heo  *
40724acfb71SThomas Gleixner  * This must be called either with wq_pool_mutex held or RCU read
40868e13a67SLai Jiangshan  * locked.  If the pool needs to be used beyond the locking in effect, the
40968e13a67SLai Jiangshan  * caller is responsible for guaranteeing that the pool stays online.
410fa1b54e6STejun Heo  *
411fa1b54e6STejun Heo  * The if/else clause exists only for the lockdep assertion and can be
412fa1b54e6STejun Heo  * ignored.
41317116969STejun Heo  */
414611c92a0STejun Heo #define for_each_pool(pool, pi)						\
415611c92a0STejun Heo 	idr_for_each_entry(&worker_pool_idr, pool, pi)			\
41668e13a67SLai Jiangshan 		if (({ assert_rcu_or_pool_mutex(); false; })) { }	\
417fa1b54e6STejun Heo 		else
41817116969STejun Heo 
41917116969STejun Heo /**
420822d8405STejun Heo  * for_each_pool_worker - iterate through all workers of a worker_pool
421822d8405STejun Heo  * @worker: iteration cursor
422822d8405STejun Heo  * @pool: worker_pool to iterate workers of
423822d8405STejun Heo  *
4241258fae7STejun Heo  * This must be called with wq_pool_attach_mutex.
425822d8405STejun Heo  *
426822d8405STejun Heo  * The if/else clause exists only for the lockdep assertion and can be
427822d8405STejun Heo  * ignored.
428822d8405STejun Heo  */
429da028469SLai Jiangshan #define for_each_pool_worker(worker, pool)				\
430da028469SLai Jiangshan 	list_for_each_entry((worker), &(pool)->workers, node)		\
4311258fae7STejun Heo 		if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
432822d8405STejun Heo 		else
433822d8405STejun Heo 
434822d8405STejun Heo /**
43549e3cf44STejun Heo  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
43649e3cf44STejun Heo  * @pwq: iteration cursor
43749e3cf44STejun Heo  * @wq: the target workqueue
43876af4d93STejun Heo  *
43924acfb71SThomas Gleixner  * This must be called either with wq->mutex held or RCU read locked.
440794b18bcSTejun Heo  * If the pwq needs to be used beyond the locking in effect, the caller is
441794b18bcSTejun Heo  * responsible for guaranteeing that the pwq stays online.
44276af4d93STejun Heo  *
44376af4d93STejun Heo  * The if/else clause exists only for the lockdep assertion and can be
44476af4d93STejun Heo  * ignored.
44549e3cf44STejun Heo  */
44649e3cf44STejun Heo #define for_each_pwq(pwq, wq)						\
44749e9d1a9SSebastian Andrzej Siewior 	list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,		\
4485a644662SJoel Fernandes (Google) 				 lockdep_is_held(&(wq->mutex)))
449f3421797STejun Heo 
450dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
451dc186ad7SThomas Gleixner 
452f9e62f31SStephen Boyd static const struct debug_obj_descr work_debug_descr;
453dc186ad7SThomas Gleixner 
45499777288SStanislaw Gruszka static void *work_debug_hint(void *addr)
45599777288SStanislaw Gruszka {
45699777288SStanislaw Gruszka 	return ((struct work_struct *) addr)->func;
45799777288SStanislaw Gruszka }
45899777288SStanislaw Gruszka 
459b9fdac7fSDu, Changbin static bool work_is_static_object(void *addr)
460b9fdac7fSDu, Changbin {
461b9fdac7fSDu, Changbin 	struct work_struct *work = addr;
462b9fdac7fSDu, Changbin 
463b9fdac7fSDu, Changbin 	return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
464b9fdac7fSDu, Changbin }
465b9fdac7fSDu, Changbin 
466dc186ad7SThomas Gleixner /*
467dc186ad7SThomas Gleixner  * fixup_init is called when:
468dc186ad7SThomas Gleixner  * - an active object is initialized
469dc186ad7SThomas Gleixner  */
47002a982a6SDu, Changbin static bool work_fixup_init(void *addr, enum debug_obj_state state)
471dc186ad7SThomas Gleixner {
472dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
473dc186ad7SThomas Gleixner 
474dc186ad7SThomas Gleixner 	switch (state) {
475dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
476dc186ad7SThomas Gleixner 		cancel_work_sync(work);
477dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
47802a982a6SDu, Changbin 		return true;
479dc186ad7SThomas Gleixner 	default:
48002a982a6SDu, Changbin 		return false;
481dc186ad7SThomas Gleixner 	}
482dc186ad7SThomas Gleixner }
483dc186ad7SThomas Gleixner 
484dc186ad7SThomas Gleixner /*
485dc186ad7SThomas Gleixner  * fixup_free is called when:
486dc186ad7SThomas Gleixner  * - an active object is freed
487dc186ad7SThomas Gleixner  */
48802a982a6SDu, Changbin static bool work_fixup_free(void *addr, enum debug_obj_state state)
489dc186ad7SThomas Gleixner {
490dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
491dc186ad7SThomas Gleixner 
492dc186ad7SThomas Gleixner 	switch (state) {
493dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
494dc186ad7SThomas Gleixner 		cancel_work_sync(work);
495dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
49602a982a6SDu, Changbin 		return true;
497dc186ad7SThomas Gleixner 	default:
49802a982a6SDu, Changbin 		return false;
499dc186ad7SThomas Gleixner 	}
500dc186ad7SThomas Gleixner }
501dc186ad7SThomas Gleixner 
502f9e62f31SStephen Boyd static const struct debug_obj_descr work_debug_descr = {
503dc186ad7SThomas Gleixner 	.name		= "work_struct",
50499777288SStanislaw Gruszka 	.debug_hint	= work_debug_hint,
505b9fdac7fSDu, Changbin 	.is_static_object = work_is_static_object,
506dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
507dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
508dc186ad7SThomas Gleixner };
509dc186ad7SThomas Gleixner 
510dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
511dc186ad7SThomas Gleixner {
512dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
513dc186ad7SThomas Gleixner }
514dc186ad7SThomas Gleixner 
515dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
516dc186ad7SThomas Gleixner {
517dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
518dc186ad7SThomas Gleixner }
519dc186ad7SThomas Gleixner 
520dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
521dc186ad7SThomas Gleixner {
522dc186ad7SThomas Gleixner 	if (onstack)
523dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
524dc186ad7SThomas Gleixner 	else
525dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
526dc186ad7SThomas Gleixner }
527dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
528dc186ad7SThomas Gleixner 
529dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
530dc186ad7SThomas Gleixner {
531dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
532dc186ad7SThomas Gleixner }
533dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
534dc186ad7SThomas Gleixner 
535ea2e64f2SThomas Gleixner void destroy_delayed_work_on_stack(struct delayed_work *work)
536ea2e64f2SThomas Gleixner {
537ea2e64f2SThomas Gleixner 	destroy_timer_on_stack(&work->timer);
538ea2e64f2SThomas Gleixner 	debug_object_free(&work->work, &work_debug_descr);
539ea2e64f2SThomas Gleixner }
540ea2e64f2SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
541ea2e64f2SThomas Gleixner 
542dc186ad7SThomas Gleixner #else
543dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
544dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
545dc186ad7SThomas Gleixner #endif
546dc186ad7SThomas Gleixner 
5474e8b22bdSLi Bin /**
54867dc8325SCai Huoqing  * worker_pool_assign_id - allocate ID and assign it to @pool
5494e8b22bdSLi Bin  * @pool: the pool pointer of interest
5504e8b22bdSLi Bin  *
5514e8b22bdSLi Bin  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
5524e8b22bdSLi Bin  * successfully, -errno on failure.
5534e8b22bdSLi Bin  */
5549daf9e67STejun Heo static int worker_pool_assign_id(struct worker_pool *pool)
5559daf9e67STejun Heo {
5569daf9e67STejun Heo 	int ret;
5579daf9e67STejun Heo 
55868e13a67SLai Jiangshan 	lockdep_assert_held(&wq_pool_mutex);
5595bcab335STejun Heo 
5604e8b22bdSLi Bin 	ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
5614e8b22bdSLi Bin 			GFP_KERNEL);
562229641a6STejun Heo 	if (ret >= 0) {
563e68035fbSTejun Heo 		pool->id = ret;
564229641a6STejun Heo 		return 0;
565229641a6STejun Heo 	}
5669daf9e67STejun Heo 	return ret;
5679daf9e67STejun Heo }
5689daf9e67STejun Heo 
56976af4d93STejun Heo /**
570df2d5ae4STejun Heo  * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
571df2d5ae4STejun Heo  * @wq: the target workqueue
572df2d5ae4STejun Heo  * @node: the node ID
573df2d5ae4STejun Heo  *
57424acfb71SThomas Gleixner  * This must be called with any of wq_pool_mutex, wq->mutex or RCU
5755b95e1afSLai Jiangshan  * read locked.
576df2d5ae4STejun Heo  * If the pwq needs to be used beyond the locking in effect, the caller is
577df2d5ae4STejun Heo  * responsible for guaranteeing that the pwq stays online.
578d185af30SYacine Belkadi  *
579d185af30SYacine Belkadi  * Return: The unbound pool_workqueue for @node.
580df2d5ae4STejun Heo  */
581df2d5ae4STejun Heo static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
582df2d5ae4STejun Heo 						  int node)
583df2d5ae4STejun Heo {
5845b95e1afSLai Jiangshan 	assert_rcu_or_wq_mutex_or_pool_mutex(wq);
585d6e022f1STejun Heo 
586d6e022f1STejun Heo 	/*
587d6e022f1STejun Heo 	 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
588d6e022f1STejun Heo 	 * delayed item is pending.  The plan is to keep CPU -> NODE
589d6e022f1STejun Heo 	 * mapping valid and stable across CPU on/offlines.  Once that
590d6e022f1STejun Heo 	 * happens, this workaround can be removed.
591d6e022f1STejun Heo 	 */
592d6e022f1STejun Heo 	if (unlikely(node == NUMA_NO_NODE))
593d6e022f1STejun Heo 		return wq->dfl_pwq;
594d6e022f1STejun Heo 
595df2d5ae4STejun Heo 	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
596df2d5ae4STejun Heo }
597df2d5ae4STejun Heo 
59873f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
59973f53c4aSTejun Heo {
60073f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
60173f53c4aSTejun Heo }
60273f53c4aSTejun Heo 
603c4560c2cSLai Jiangshan static int get_work_color(unsigned long work_data)
60473f53c4aSTejun Heo {
605c4560c2cSLai Jiangshan 	return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
60673f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
60773f53c4aSTejun Heo }
60873f53c4aSTejun Heo 
60973f53c4aSTejun Heo static int work_next_color(int color)
61073f53c4aSTejun Heo {
61173f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
612a848e3b6SOleg Nesterov }
613a848e3b6SOleg Nesterov 
6144594bf15SDavid Howells /*
615112202d9STejun Heo  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
616112202d9STejun Heo  * contain the pointer to the queued pwq.  Once execution starts, the flag
6177c3eed5cSTejun Heo  * is cleared and the high bits contain OFFQ flags and pool ID.
6187a22ad75STejun Heo  *
619112202d9STejun Heo  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
620112202d9STejun Heo  * and clear_work_data() can be used to set the pwq, pool or clear
621bbb68dfaSTejun Heo  * work->data.  These functions should only be called while the work is
622bbb68dfaSTejun Heo  * owned - ie. while the PENDING bit is set.
6237a22ad75STejun Heo  *
624112202d9STejun Heo  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
6257c3eed5cSTejun Heo  * corresponding to a work.  Pool is available once the work has been
626112202d9STejun Heo  * queued anywhere after initialization until it is sync canceled.  pwq is
6277c3eed5cSTejun Heo  * available only while the work item is queued.
628bbb68dfaSTejun Heo  *
629bbb68dfaSTejun Heo  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
630bbb68dfaSTejun Heo  * canceled.  While being canceled, a work item may have its PENDING set
631bbb68dfaSTejun Heo  * but stay off timer and worklist for arbitrarily long and nobody should
632bbb68dfaSTejun Heo  * try to steal the PENDING bit.
6334594bf15SDavid Howells  */
6347a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data,
6357a22ad75STejun Heo 				 unsigned long flags)
6367a22ad75STejun Heo {
6376183c009STejun Heo 	WARN_ON_ONCE(!work_pending(work));
6387a22ad75STejun Heo 	atomic_long_set(&work->data, data | flags | work_static(work));
6397a22ad75STejun Heo }
6407a22ad75STejun Heo 
641112202d9STejun Heo static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
6424690c4abSTejun Heo 			 unsigned long extra_flags)
643365970a1SDavid Howells {
644112202d9STejun Heo 	set_work_data(work, (unsigned long)pwq,
645112202d9STejun Heo 		      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
646365970a1SDavid Howells }
647365970a1SDavid Howells 
6484468a00fSLai Jiangshan static void set_work_pool_and_keep_pending(struct work_struct *work,
6494468a00fSLai Jiangshan 					   int pool_id)
6504468a00fSLai Jiangshan {
6514468a00fSLai Jiangshan 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
6524468a00fSLai Jiangshan 		      WORK_STRUCT_PENDING);
6534468a00fSLai Jiangshan }
6544468a00fSLai Jiangshan 
6557c3eed5cSTejun Heo static void set_work_pool_and_clear_pending(struct work_struct *work,
6567c3eed5cSTejun Heo 					    int pool_id)
6574d707b9fSOleg Nesterov {
65823657bb1STejun Heo 	/*
65923657bb1STejun Heo 	 * The following wmb is paired with the implied mb in
66023657bb1STejun Heo 	 * test_and_set_bit(PENDING) and ensures all updates to @work made
66123657bb1STejun Heo 	 * here are visible to and precede any updates by the next PENDING
66223657bb1STejun Heo 	 * owner.
66323657bb1STejun Heo 	 */
66423657bb1STejun Heo 	smp_wmb();
6657c3eed5cSTejun Heo 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
666346c09f8SRoman Pen 	/*
667346c09f8SRoman Pen 	 * The following mb guarantees that previous clear of a PENDING bit
668346c09f8SRoman Pen 	 * will not be reordered with any speculative LOADS or STORES from
669346c09f8SRoman Pen 	 * work->current_func, which is executed afterwards.  This possible
6708bdc6201SLiu Song 	 * reordering can lead to a missed execution on attempt to queue
671346c09f8SRoman Pen 	 * the same @work.  E.g. consider this case:
672346c09f8SRoman Pen 	 *
673346c09f8SRoman Pen 	 *   CPU#0                         CPU#1
674346c09f8SRoman Pen 	 *   ----------------------------  --------------------------------
675346c09f8SRoman Pen 	 *
676346c09f8SRoman Pen 	 * 1  STORE event_indicated
677346c09f8SRoman Pen 	 * 2  queue_work_on() {
678346c09f8SRoman Pen 	 * 3    test_and_set_bit(PENDING)
679346c09f8SRoman Pen 	 * 4 }                             set_..._and_clear_pending() {
680346c09f8SRoman Pen 	 * 5                                 set_work_data() # clear bit
681346c09f8SRoman Pen 	 * 6                                 smp_mb()
682346c09f8SRoman Pen 	 * 7                               work->current_func() {
683346c09f8SRoman Pen 	 * 8				      LOAD event_indicated
684346c09f8SRoman Pen 	 *				   }
685346c09f8SRoman Pen 	 *
686346c09f8SRoman Pen 	 * Without an explicit full barrier speculative LOAD on line 8 can
687346c09f8SRoman Pen 	 * be executed before CPU#0 does STORE on line 1.  If that happens,
688346c09f8SRoman Pen 	 * CPU#0 observes the PENDING bit is still set and new execution of
689346c09f8SRoman Pen 	 * a @work is not queued in a hope, that CPU#1 will eventually
690346c09f8SRoman Pen 	 * finish the queued @work.  Meanwhile CPU#1 does not see
691346c09f8SRoman Pen 	 * event_indicated is set, because speculative LOAD was executed
692346c09f8SRoman Pen 	 * before actual STORE.
693346c09f8SRoman Pen 	 */
694346c09f8SRoman Pen 	smp_mb();
6954d707b9fSOleg Nesterov }
6964d707b9fSOleg Nesterov 
6977a22ad75STejun Heo static void clear_work_data(struct work_struct *work)
698365970a1SDavid Howells {
6997c3eed5cSTejun Heo 	smp_wmb();	/* see set_work_pool_and_clear_pending() */
7007c3eed5cSTejun Heo 	set_work_data(work, WORK_STRUCT_NO_POOL, 0);
7017a22ad75STejun Heo }
7027a22ad75STejun Heo 
703112202d9STejun Heo static struct pool_workqueue *get_work_pwq(struct work_struct *work)
7047a22ad75STejun Heo {
705e120153dSTejun Heo 	unsigned long data = atomic_long_read(&work->data);
7067a22ad75STejun Heo 
707112202d9STejun Heo 	if (data & WORK_STRUCT_PWQ)
708e120153dSTejun Heo 		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
709e120153dSTejun Heo 	else
710e120153dSTejun Heo 		return NULL;
7117a22ad75STejun Heo }
7127a22ad75STejun Heo 
7137c3eed5cSTejun Heo /**
7147c3eed5cSTejun Heo  * get_work_pool - return the worker_pool a given work was associated with
7157c3eed5cSTejun Heo  * @work: the work item of interest
7167c3eed5cSTejun Heo  *
71768e13a67SLai Jiangshan  * Pools are created and destroyed under wq_pool_mutex, and allows read
71824acfb71SThomas Gleixner  * access under RCU read lock.  As such, this function should be
71924acfb71SThomas Gleixner  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
720fa1b54e6STejun Heo  *
721fa1b54e6STejun Heo  * All fields of the returned pool are accessible as long as the above
722fa1b54e6STejun Heo  * mentioned locking is in effect.  If the returned pool needs to be used
723fa1b54e6STejun Heo  * beyond the critical section, the caller is responsible for ensuring the
724fa1b54e6STejun Heo  * returned pool is and stays online.
725d185af30SYacine Belkadi  *
726d185af30SYacine Belkadi  * Return: The worker_pool @work was last associated with.  %NULL if none.
7277c3eed5cSTejun Heo  */
7287c3eed5cSTejun Heo static struct worker_pool *get_work_pool(struct work_struct *work)
7297a22ad75STejun Heo {
730e120153dSTejun Heo 	unsigned long data = atomic_long_read(&work->data);
7317c3eed5cSTejun Heo 	int pool_id;
7327a22ad75STejun Heo 
73368e13a67SLai Jiangshan 	assert_rcu_or_pool_mutex();
734fa1b54e6STejun Heo 
735112202d9STejun Heo 	if (data & WORK_STRUCT_PWQ)
736112202d9STejun Heo 		return ((struct pool_workqueue *)
7377c3eed5cSTejun Heo 			(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
7387a22ad75STejun Heo 
7397c3eed5cSTejun Heo 	pool_id = data >> WORK_OFFQ_POOL_SHIFT;
7407c3eed5cSTejun Heo 	if (pool_id == WORK_OFFQ_POOL_NONE)
7417a22ad75STejun Heo 		return NULL;
7427a22ad75STejun Heo 
743fa1b54e6STejun Heo 	return idr_find(&worker_pool_idr, pool_id);
7447c3eed5cSTejun Heo }
7457c3eed5cSTejun Heo 
7467c3eed5cSTejun Heo /**
7477c3eed5cSTejun Heo  * get_work_pool_id - return the worker pool ID a given work is associated with
7487c3eed5cSTejun Heo  * @work: the work item of interest
7497c3eed5cSTejun Heo  *
750d185af30SYacine Belkadi  * Return: The worker_pool ID @work was last associated with.
7517c3eed5cSTejun Heo  * %WORK_OFFQ_POOL_NONE if none.
7527c3eed5cSTejun Heo  */
7537c3eed5cSTejun Heo static int get_work_pool_id(struct work_struct *work)
7547c3eed5cSTejun Heo {
75554d5b7d0SLai Jiangshan 	unsigned long data = atomic_long_read(&work->data);
7567c3eed5cSTejun Heo 
757112202d9STejun Heo 	if (data & WORK_STRUCT_PWQ)
758112202d9STejun Heo 		return ((struct pool_workqueue *)
75954d5b7d0SLai Jiangshan 			(data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
76054d5b7d0SLai Jiangshan 
76154d5b7d0SLai Jiangshan 	return data >> WORK_OFFQ_POOL_SHIFT;
7627c3eed5cSTejun Heo }
7637c3eed5cSTejun Heo 
764bbb68dfaSTejun Heo static void mark_work_canceling(struct work_struct *work)
765bbb68dfaSTejun Heo {
7667c3eed5cSTejun Heo 	unsigned long pool_id = get_work_pool_id(work);
767bbb68dfaSTejun Heo 
7687c3eed5cSTejun Heo 	pool_id <<= WORK_OFFQ_POOL_SHIFT;
7697c3eed5cSTejun Heo 	set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
770bbb68dfaSTejun Heo }
771bbb68dfaSTejun Heo 
772bbb68dfaSTejun Heo static bool work_is_canceling(struct work_struct *work)
773bbb68dfaSTejun Heo {
774bbb68dfaSTejun Heo 	unsigned long data = atomic_long_read(&work->data);
775bbb68dfaSTejun Heo 
776112202d9STejun Heo 	return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
777bbb68dfaSTejun Heo }
778bbb68dfaSTejun Heo 
779e22bee78STejun Heo /*
7803270476aSTejun Heo  * Policy functions.  These define the policies on how the global worker
7813270476aSTejun Heo  * pools are managed.  Unless noted otherwise, these functions assume that
782d565ed63STejun Heo  * they're being called with pool->lock held.
783e22bee78STejun Heo  */
784e22bee78STejun Heo 
78563d95a91STejun Heo static bool __need_more_worker(struct worker_pool *pool)
786649027d7STejun Heo {
787bc35f7efSLai Jiangshan 	return !pool->nr_running;
788649027d7STejun Heo }
789649027d7STejun Heo 
790e22bee78STejun Heo /*
791e22bee78STejun Heo  * Need to wake up a worker?  Called from anything but currently
792e22bee78STejun Heo  * running workers.
793974271c4STejun Heo  *
794974271c4STejun Heo  * Note that, because unbound workers never contribute to nr_running, this
795706026c2STejun Heo  * function will always return %true for unbound pools as long as the
796974271c4STejun Heo  * worklist isn't empty.
797e22bee78STejun Heo  */
79863d95a91STejun Heo static bool need_more_worker(struct worker_pool *pool)
799e22bee78STejun Heo {
80063d95a91STejun Heo 	return !list_empty(&pool->worklist) && __need_more_worker(pool);
801e22bee78STejun Heo }
802e22bee78STejun Heo 
803e22bee78STejun Heo /* Can I start working?  Called from busy but !running workers. */
80463d95a91STejun Heo static bool may_start_working(struct worker_pool *pool)
805e22bee78STejun Heo {
80663d95a91STejun Heo 	return pool->nr_idle;
807e22bee78STejun Heo }
808e22bee78STejun Heo 
809e22bee78STejun Heo /* Do I need to keep working?  Called from currently running workers. */
81063d95a91STejun Heo static bool keep_working(struct worker_pool *pool)
811e22bee78STejun Heo {
812bc35f7efSLai Jiangshan 	return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
813e22bee78STejun Heo }
814e22bee78STejun Heo 
815e22bee78STejun Heo /* Do we need a new worker?  Called from manager. */
81663d95a91STejun Heo static bool need_to_create_worker(struct worker_pool *pool)
817e22bee78STejun Heo {
81863d95a91STejun Heo 	return need_more_worker(pool) && !may_start_working(pool);
819e22bee78STejun Heo }
820e22bee78STejun Heo 
821e22bee78STejun Heo /* Do we have too many workers and should some go away? */
82263d95a91STejun Heo static bool too_many_workers(struct worker_pool *pool)
823e22bee78STejun Heo {
824692b4825STejun Heo 	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
82563d95a91STejun Heo 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
82663d95a91STejun Heo 	int nr_busy = pool->nr_workers - nr_idle;
827e22bee78STejun Heo 
828e22bee78STejun Heo 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
829e22bee78STejun Heo }
830e22bee78STejun Heo 
831e22bee78STejun Heo /*
832e22bee78STejun Heo  * Wake up functions.
833e22bee78STejun Heo  */
834e22bee78STejun Heo 
8352c1f1a91SLai Jiangshan /* Return the first idle worker.  Called with pool->lock held. */
8361037de36SLai Jiangshan static struct worker *first_idle_worker(struct worker_pool *pool)
8377e11629dSTejun Heo {
83863d95a91STejun Heo 	if (unlikely(list_empty(&pool->idle_list)))
8397e11629dSTejun Heo 		return NULL;
8407e11629dSTejun Heo 
84163d95a91STejun Heo 	return list_first_entry(&pool->idle_list, struct worker, entry);
8427e11629dSTejun Heo }
8437e11629dSTejun Heo 
8447e11629dSTejun Heo /**
8457e11629dSTejun Heo  * wake_up_worker - wake up an idle worker
84663d95a91STejun Heo  * @pool: worker pool to wake worker from
8477e11629dSTejun Heo  *
84863d95a91STejun Heo  * Wake up the first idle worker of @pool.
8497e11629dSTejun Heo  *
8507e11629dSTejun Heo  * CONTEXT:
851a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
8527e11629dSTejun Heo  */
85363d95a91STejun Heo static void wake_up_worker(struct worker_pool *pool)
8547e11629dSTejun Heo {
8551037de36SLai Jiangshan 	struct worker *worker = first_idle_worker(pool);
8567e11629dSTejun Heo 
8577e11629dSTejun Heo 	if (likely(worker))
8587e11629dSTejun Heo 		wake_up_process(worker->task);
8597e11629dSTejun Heo }
8607e11629dSTejun Heo 
8614690c4abSTejun Heo /**
8626d25be57SThomas Gleixner  * wq_worker_running - a worker is running again
863e22bee78STejun Heo  * @task: task waking up
864e22bee78STejun Heo  *
8656d25be57SThomas Gleixner  * This function is called when a worker returns from schedule()
866e22bee78STejun Heo  */
8676d25be57SThomas Gleixner void wq_worker_running(struct task_struct *task)
868e22bee78STejun Heo {
869e22bee78STejun Heo 	struct worker *worker = kthread_data(task);
870e22bee78STejun Heo 
8716d25be57SThomas Gleixner 	if (!worker->sleeping)
8726d25be57SThomas Gleixner 		return;
87307edfeceSFrederic Weisbecker 
87407edfeceSFrederic Weisbecker 	/*
87507edfeceSFrederic Weisbecker 	 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
87607edfeceSFrederic Weisbecker 	 * and the nr_running increment below, we may ruin the nr_running reset
87707edfeceSFrederic Weisbecker 	 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
87807edfeceSFrederic Weisbecker 	 * pool. Protect against such race.
87907edfeceSFrederic Weisbecker 	 */
88007edfeceSFrederic Weisbecker 	preempt_disable();
8816d25be57SThomas Gleixner 	if (!(worker->flags & WORKER_NOT_RUNNING))
882bc35f7efSLai Jiangshan 		worker->pool->nr_running++;
88307edfeceSFrederic Weisbecker 	preempt_enable();
8846d25be57SThomas Gleixner 	worker->sleeping = 0;
88536576000SJoonsoo Kim }
886e22bee78STejun Heo 
887e22bee78STejun Heo /**
888e22bee78STejun Heo  * wq_worker_sleeping - a worker is going to sleep
889e22bee78STejun Heo  * @task: task going to sleep
890e22bee78STejun Heo  *
8916d25be57SThomas Gleixner  * This function is called from schedule() when a busy worker is
892ccf45156SLai Jiangshan  * going to sleep.
893e22bee78STejun Heo  */
8946d25be57SThomas Gleixner void wq_worker_sleeping(struct task_struct *task)
895e22bee78STejun Heo {
896cc5bff38SLai Jiangshan 	struct worker *worker = kthread_data(task);
897111c225aSTejun Heo 	struct worker_pool *pool;
898e22bee78STejun Heo 
899111c225aSTejun Heo 	/*
900111c225aSTejun Heo 	 * Rescuers, which may not have all the fields set up like normal
901111c225aSTejun Heo 	 * workers, also reach here, let's not access anything before
902111c225aSTejun Heo 	 * checking NOT_RUNNING.
903111c225aSTejun Heo 	 */
9042d64672eSSteven Rostedt 	if (worker->flags & WORKER_NOT_RUNNING)
9056d25be57SThomas Gleixner 		return;
906e22bee78STejun Heo 
907111c225aSTejun Heo 	pool = worker->pool;
908111c225aSTejun Heo 
90962849a96SSebastian Andrzej Siewior 	/* Return if preempted before wq_worker_running() was reached */
91062849a96SSebastian Andrzej Siewior 	if (worker->sleeping)
9116d25be57SThomas Gleixner 		return;
9126d25be57SThomas Gleixner 
9136d25be57SThomas Gleixner 	worker->sleeping = 1;
914a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
915e22bee78STejun Heo 
916e22bee78STejun Heo 	/*
91745c753f5SFrederic Weisbecker 	 * Recheck in case unbind_workers() preempted us. We don't
91845c753f5SFrederic Weisbecker 	 * want to decrement nr_running after the worker is unbound
91945c753f5SFrederic Weisbecker 	 * and nr_running has been reset.
92045c753f5SFrederic Weisbecker 	 */
92145c753f5SFrederic Weisbecker 	if (worker->flags & WORKER_NOT_RUNNING) {
92245c753f5SFrederic Weisbecker 		raw_spin_unlock_irq(&pool->lock);
92345c753f5SFrederic Weisbecker 		return;
92445c753f5SFrederic Weisbecker 	}
92545c753f5SFrederic Weisbecker 
926bc35f7efSLai Jiangshan 	pool->nr_running--;
927bc35f7efSLai Jiangshan 	if (need_more_worker(pool))
928cc5bff38SLai Jiangshan 		wake_up_worker(pool);
929a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
930e22bee78STejun Heo }
931e22bee78STejun Heo 
932e22bee78STejun Heo /**
9331b69ac6bSJohannes Weiner  * wq_worker_last_func - retrieve worker's last work function
9348194fe94SBart Van Assche  * @task: Task to retrieve last work function of.
9351b69ac6bSJohannes Weiner  *
9361b69ac6bSJohannes Weiner  * Determine the last function a worker executed. This is called from
9371b69ac6bSJohannes Weiner  * the scheduler to get a worker's last known identity.
9381b69ac6bSJohannes Weiner  *
9391b69ac6bSJohannes Weiner  * CONTEXT:
940a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(rq->lock)
9411b69ac6bSJohannes Weiner  *
9424b047002SJohannes Weiner  * This function is called during schedule() when a kworker is going
9434b047002SJohannes Weiner  * to sleep. It's used by psi to identify aggregation workers during
9444b047002SJohannes Weiner  * dequeuing, to allow periodic aggregation to shut-off when that
9454b047002SJohannes Weiner  * worker is the last task in the system or cgroup to go to sleep.
9464b047002SJohannes Weiner  *
9474b047002SJohannes Weiner  * As this function doesn't involve any workqueue-related locking, it
9484b047002SJohannes Weiner  * only returns stable values when called from inside the scheduler's
9494b047002SJohannes Weiner  * queuing and dequeuing paths, when @task, which must be a kworker,
9504b047002SJohannes Weiner  * is guaranteed to not be processing any works.
9514b047002SJohannes Weiner  *
9521b69ac6bSJohannes Weiner  * Return:
9531b69ac6bSJohannes Weiner  * The last work function %current executed as a worker, NULL if it
9541b69ac6bSJohannes Weiner  * hasn't executed any work yet.
9551b69ac6bSJohannes Weiner  */
9561b69ac6bSJohannes Weiner work_func_t wq_worker_last_func(struct task_struct *task)
9571b69ac6bSJohannes Weiner {
9581b69ac6bSJohannes Weiner 	struct worker *worker = kthread_data(task);
9591b69ac6bSJohannes Weiner 
9601b69ac6bSJohannes Weiner 	return worker->last_func;
9611b69ac6bSJohannes Weiner }
9621b69ac6bSJohannes Weiner 
9631b69ac6bSJohannes Weiner /**
964e22bee78STejun Heo  * worker_set_flags - set worker flags and adjust nr_running accordingly
965cb444766STejun Heo  * @worker: self
966d302f017STejun Heo  * @flags: flags to set
967d302f017STejun Heo  *
968228f1d00SLai Jiangshan  * Set @flags in @worker->flags and adjust nr_running accordingly.
969d302f017STejun Heo  *
970cb444766STejun Heo  * CONTEXT:
971a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock)
972d302f017STejun Heo  */
973228f1d00SLai Jiangshan static inline void worker_set_flags(struct worker *worker, unsigned int flags)
974d302f017STejun Heo {
975bd7bdd43STejun Heo 	struct worker_pool *pool = worker->pool;
976e22bee78STejun Heo 
977cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
978cb444766STejun Heo 
979228f1d00SLai Jiangshan 	/* If transitioning into NOT_RUNNING, adjust nr_running. */
980e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) &&
981e22bee78STejun Heo 	    !(worker->flags & WORKER_NOT_RUNNING)) {
982bc35f7efSLai Jiangshan 		pool->nr_running--;
983e22bee78STejun Heo 	}
984e22bee78STejun Heo 
985d302f017STejun Heo 	worker->flags |= flags;
986d302f017STejun Heo }
987d302f017STejun Heo 
988d302f017STejun Heo /**
989e22bee78STejun Heo  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
990cb444766STejun Heo  * @worker: self
991d302f017STejun Heo  * @flags: flags to clear
992d302f017STejun Heo  *
993e22bee78STejun Heo  * Clear @flags in @worker->flags and adjust nr_running accordingly.
994d302f017STejun Heo  *
995cb444766STejun Heo  * CONTEXT:
996a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock)
997d302f017STejun Heo  */
998d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
999d302f017STejun Heo {
100063d95a91STejun Heo 	struct worker_pool *pool = worker->pool;
1001e22bee78STejun Heo 	unsigned int oflags = worker->flags;
1002e22bee78STejun Heo 
1003cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
1004cb444766STejun Heo 
1005d302f017STejun Heo 	worker->flags &= ~flags;
1006e22bee78STejun Heo 
100742c025f3STejun Heo 	/*
100842c025f3STejun Heo 	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
100942c025f3STejun Heo 	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
101042c025f3STejun Heo 	 * of multiple flags, not a single flag.
101142c025f3STejun Heo 	 */
1012e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
1013e22bee78STejun Heo 		if (!(worker->flags & WORKER_NOT_RUNNING))
1014bc35f7efSLai Jiangshan 			pool->nr_running++;
1015d302f017STejun Heo }
1016d302f017STejun Heo 
1017d302f017STejun Heo /**
10188cca0eeaSTejun Heo  * find_worker_executing_work - find worker which is executing a work
1019c9e7cf27STejun Heo  * @pool: pool of interest
10208cca0eeaSTejun Heo  * @work: work to find worker for
10218cca0eeaSTejun Heo  *
1022c9e7cf27STejun Heo  * Find a worker which is executing @work on @pool by searching
1023c9e7cf27STejun Heo  * @pool->busy_hash which is keyed by the address of @work.  For a worker
1024a2c1c57bSTejun Heo  * to match, its current execution should match the address of @work and
1025a2c1c57bSTejun Heo  * its work function.  This is to avoid unwanted dependency between
1026a2c1c57bSTejun Heo  * unrelated work executions through a work item being recycled while still
1027a2c1c57bSTejun Heo  * being executed.
1028a2c1c57bSTejun Heo  *
1029a2c1c57bSTejun Heo  * This is a bit tricky.  A work item may be freed once its execution
1030a2c1c57bSTejun Heo  * starts and nothing prevents the freed area from being recycled for
1031a2c1c57bSTejun Heo  * another work item.  If the same work item address ends up being reused
1032a2c1c57bSTejun Heo  * before the original execution finishes, workqueue will identify the
1033a2c1c57bSTejun Heo  * recycled work item as currently executing and make it wait until the
1034a2c1c57bSTejun Heo  * current execution finishes, introducing an unwanted dependency.
1035a2c1c57bSTejun Heo  *
1036c5aa87bbSTejun Heo  * This function checks the work item address and work function to avoid
1037c5aa87bbSTejun Heo  * false positives.  Note that this isn't complete as one may construct a
1038c5aa87bbSTejun Heo  * work function which can introduce dependency onto itself through a
1039c5aa87bbSTejun Heo  * recycled work item.  Well, if somebody wants to shoot oneself in the
1040c5aa87bbSTejun Heo  * foot that badly, there's only so much we can do, and if such deadlock
1041c5aa87bbSTejun Heo  * actually occurs, it should be easy to locate the culprit work function.
10428cca0eeaSTejun Heo  *
10438cca0eeaSTejun Heo  * CONTEXT:
1044a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
10458cca0eeaSTejun Heo  *
1046d185af30SYacine Belkadi  * Return:
1047d185af30SYacine Belkadi  * Pointer to worker which is executing @work if found, %NULL
10488cca0eeaSTejun Heo  * otherwise.
10498cca0eeaSTejun Heo  */
1050c9e7cf27STejun Heo static struct worker *find_worker_executing_work(struct worker_pool *pool,
10518cca0eeaSTejun Heo 						 struct work_struct *work)
10528cca0eeaSTejun Heo {
105342f8570fSSasha Levin 	struct worker *worker;
105442f8570fSSasha Levin 
1055b67bfe0dSSasha Levin 	hash_for_each_possible(pool->busy_hash, worker, hentry,
1056a2c1c57bSTejun Heo 			       (unsigned long)work)
1057a2c1c57bSTejun Heo 		if (worker->current_work == work &&
1058a2c1c57bSTejun Heo 		    worker->current_func == work->func)
105942f8570fSSasha Levin 			return worker;
106042f8570fSSasha Levin 
106142f8570fSSasha Levin 	return NULL;
10628cca0eeaSTejun Heo }
10638cca0eeaSTejun Heo 
10648cca0eeaSTejun Heo /**
1065bf4ede01STejun Heo  * move_linked_works - move linked works to a list
1066bf4ede01STejun Heo  * @work: start of series of works to be scheduled
1067bf4ede01STejun Heo  * @head: target list to append @work to
1068402dd89dSShailendra Verma  * @nextp: out parameter for nested worklist walking
1069bf4ede01STejun Heo  *
1070bf4ede01STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
1071bf4ede01STejun Heo  * be scheduled starts at @work and includes any consecutive work with
1072bf4ede01STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
1073bf4ede01STejun Heo  *
1074bf4ede01STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
1075bf4ede01STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
1076bf4ede01STejun Heo  * nested inside outer list_for_each_entry_safe().
1077bf4ede01STejun Heo  *
1078bf4ede01STejun Heo  * CONTEXT:
1079a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
1080bf4ede01STejun Heo  */
1081bf4ede01STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
1082bf4ede01STejun Heo 			      struct work_struct **nextp)
1083bf4ede01STejun Heo {
1084bf4ede01STejun Heo 	struct work_struct *n;
1085bf4ede01STejun Heo 
1086bf4ede01STejun Heo 	/*
1087bf4ede01STejun Heo 	 * Linked worklist will always end before the end of the list,
1088bf4ede01STejun Heo 	 * use NULL for list head.
1089bf4ede01STejun Heo 	 */
1090bf4ede01STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1091bf4ede01STejun Heo 		list_move_tail(&work->entry, head);
1092bf4ede01STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1093bf4ede01STejun Heo 			break;
1094bf4ede01STejun Heo 	}
1095bf4ede01STejun Heo 
1096bf4ede01STejun Heo 	/*
1097bf4ede01STejun Heo 	 * If we're already inside safe list traversal and have moved
1098bf4ede01STejun Heo 	 * multiple works to the scheduled queue, the next position
1099bf4ede01STejun Heo 	 * needs to be updated.
1100bf4ede01STejun Heo 	 */
1101bf4ede01STejun Heo 	if (nextp)
1102bf4ede01STejun Heo 		*nextp = n;
1103bf4ede01STejun Heo }
1104bf4ede01STejun Heo 
11058864b4e5STejun Heo /**
11068864b4e5STejun Heo  * get_pwq - get an extra reference on the specified pool_workqueue
11078864b4e5STejun Heo  * @pwq: pool_workqueue to get
11088864b4e5STejun Heo  *
11098864b4e5STejun Heo  * Obtain an extra reference on @pwq.  The caller should guarantee that
11108864b4e5STejun Heo  * @pwq has positive refcnt and be holding the matching pool->lock.
11118864b4e5STejun Heo  */
11128864b4e5STejun Heo static void get_pwq(struct pool_workqueue *pwq)
11138864b4e5STejun Heo {
11148864b4e5STejun Heo 	lockdep_assert_held(&pwq->pool->lock);
11158864b4e5STejun Heo 	WARN_ON_ONCE(pwq->refcnt <= 0);
11168864b4e5STejun Heo 	pwq->refcnt++;
11178864b4e5STejun Heo }
11188864b4e5STejun Heo 
11198864b4e5STejun Heo /**
11208864b4e5STejun Heo  * put_pwq - put a pool_workqueue reference
11218864b4e5STejun Heo  * @pwq: pool_workqueue to put
11228864b4e5STejun Heo  *
11238864b4e5STejun Heo  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
11248864b4e5STejun Heo  * destruction.  The caller should be holding the matching pool->lock.
11258864b4e5STejun Heo  */
11268864b4e5STejun Heo static void put_pwq(struct pool_workqueue *pwq)
11278864b4e5STejun Heo {
11288864b4e5STejun Heo 	lockdep_assert_held(&pwq->pool->lock);
11298864b4e5STejun Heo 	if (likely(--pwq->refcnt))
11308864b4e5STejun Heo 		return;
11318864b4e5STejun Heo 	if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
11328864b4e5STejun Heo 		return;
11338864b4e5STejun Heo 	/*
11348864b4e5STejun Heo 	 * @pwq can't be released under pool->lock, bounce to
11358864b4e5STejun Heo 	 * pwq_unbound_release_workfn().  This never recurses on the same
11368864b4e5STejun Heo 	 * pool->lock as this path is taken only for unbound workqueues and
11378864b4e5STejun Heo 	 * the release work item is scheduled on a per-cpu workqueue.  To
11388864b4e5STejun Heo 	 * avoid lockdep warning, unbound pool->locks are given lockdep
11398864b4e5STejun Heo 	 * subclass of 1 in get_unbound_pool().
11408864b4e5STejun Heo 	 */
11418864b4e5STejun Heo 	schedule_work(&pwq->unbound_release_work);
11428864b4e5STejun Heo }
11438864b4e5STejun Heo 
1144dce90d47STejun Heo /**
1145dce90d47STejun Heo  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1146dce90d47STejun Heo  * @pwq: pool_workqueue to put (can be %NULL)
1147dce90d47STejun Heo  *
1148dce90d47STejun Heo  * put_pwq() with locking.  This function also allows %NULL @pwq.
1149dce90d47STejun Heo  */
1150dce90d47STejun Heo static void put_pwq_unlocked(struct pool_workqueue *pwq)
1151dce90d47STejun Heo {
1152dce90d47STejun Heo 	if (pwq) {
1153dce90d47STejun Heo 		/*
115424acfb71SThomas Gleixner 		 * As both pwqs and pools are RCU protected, the
1155dce90d47STejun Heo 		 * following lock operations are safe.
1156dce90d47STejun Heo 		 */
1157a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&pwq->pool->lock);
1158dce90d47STejun Heo 		put_pwq(pwq);
1159a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pwq->pool->lock);
1160dce90d47STejun Heo 	}
1161dce90d47STejun Heo }
1162dce90d47STejun Heo 
1163f97a4a1aSLai Jiangshan static void pwq_activate_inactive_work(struct work_struct *work)
1164bf4ede01STejun Heo {
1165112202d9STejun Heo 	struct pool_workqueue *pwq = get_work_pwq(work);
1166bf4ede01STejun Heo 
1167bf4ede01STejun Heo 	trace_workqueue_activate_work(work);
116882607adcSTejun Heo 	if (list_empty(&pwq->pool->worklist))
116982607adcSTejun Heo 		pwq->pool->watchdog_ts = jiffies;
1170112202d9STejun Heo 	move_linked_works(work, &pwq->pool->worklist, NULL);
1171f97a4a1aSLai Jiangshan 	__clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1172112202d9STejun Heo 	pwq->nr_active++;
1173bf4ede01STejun Heo }
1174bf4ede01STejun Heo 
1175f97a4a1aSLai Jiangshan static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
11763aa62497SLai Jiangshan {
1177f97a4a1aSLai Jiangshan 	struct work_struct *work = list_first_entry(&pwq->inactive_works,
11783aa62497SLai Jiangshan 						    struct work_struct, entry);
11793aa62497SLai Jiangshan 
1180f97a4a1aSLai Jiangshan 	pwq_activate_inactive_work(work);
11813aa62497SLai Jiangshan }
11823aa62497SLai Jiangshan 
1183bf4ede01STejun Heo /**
1184112202d9STejun Heo  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1185112202d9STejun Heo  * @pwq: pwq of interest
1186c4560c2cSLai Jiangshan  * @work_data: work_data of work which left the queue
1187bf4ede01STejun Heo  *
1188bf4ede01STejun Heo  * A work either has completed or is removed from pending queue,
1189112202d9STejun Heo  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1190bf4ede01STejun Heo  *
1191bf4ede01STejun Heo  * CONTEXT:
1192a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
1193bf4ede01STejun Heo  */
1194c4560c2cSLai Jiangshan static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1195bf4ede01STejun Heo {
1196c4560c2cSLai Jiangshan 	int color = get_work_color(work_data);
1197c4560c2cSLai Jiangshan 
1198018f3a13SLai Jiangshan 	if (!(work_data & WORK_STRUCT_INACTIVE)) {
1199112202d9STejun Heo 		pwq->nr_active--;
1200f97a4a1aSLai Jiangshan 		if (!list_empty(&pwq->inactive_works)) {
1201f97a4a1aSLai Jiangshan 			/* one down, submit an inactive one */
1202112202d9STejun Heo 			if (pwq->nr_active < pwq->max_active)
1203f97a4a1aSLai Jiangshan 				pwq_activate_first_inactive(pwq);
1204bf4ede01STejun Heo 		}
1205018f3a13SLai Jiangshan 	}
1206018f3a13SLai Jiangshan 
1207018f3a13SLai Jiangshan 	pwq->nr_in_flight[color]--;
1208bf4ede01STejun Heo 
1209bf4ede01STejun Heo 	/* is flush in progress and are we at the flushing tip? */
1210112202d9STejun Heo 	if (likely(pwq->flush_color != color))
12118864b4e5STejun Heo 		goto out_put;
1212bf4ede01STejun Heo 
1213bf4ede01STejun Heo 	/* are there still in-flight works? */
1214112202d9STejun Heo 	if (pwq->nr_in_flight[color])
12158864b4e5STejun Heo 		goto out_put;
1216bf4ede01STejun Heo 
1217112202d9STejun Heo 	/* this pwq is done, clear flush_color */
1218112202d9STejun Heo 	pwq->flush_color = -1;
1219bf4ede01STejun Heo 
1220bf4ede01STejun Heo 	/*
1221112202d9STejun Heo 	 * If this was the last pwq, wake up the first flusher.  It
1222bf4ede01STejun Heo 	 * will handle the rest.
1223bf4ede01STejun Heo 	 */
1224112202d9STejun Heo 	if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1225112202d9STejun Heo 		complete(&pwq->wq->first_flusher->done);
12268864b4e5STejun Heo out_put:
12278864b4e5STejun Heo 	put_pwq(pwq);
1228bf4ede01STejun Heo }
1229bf4ede01STejun Heo 
123036e227d2STejun Heo /**
1231bbb68dfaSTejun Heo  * try_to_grab_pending - steal work item from worklist and disable irq
123236e227d2STejun Heo  * @work: work item to steal
123336e227d2STejun Heo  * @is_dwork: @work is a delayed_work
1234bbb68dfaSTejun Heo  * @flags: place to store irq state
123536e227d2STejun Heo  *
123636e227d2STejun Heo  * Try to grab PENDING bit of @work.  This function can handle @work in any
1237d185af30SYacine Belkadi  * stable state - idle, on timer or on worklist.
123836e227d2STejun Heo  *
1239d185af30SYacine Belkadi  * Return:
12403eb6b31bSMauro Carvalho Chehab  *
12413eb6b31bSMauro Carvalho Chehab  *  ========	================================================================
124236e227d2STejun Heo  *  1		if @work was pending and we successfully stole PENDING
124336e227d2STejun Heo  *  0		if @work was idle and we claimed PENDING
124436e227d2STejun Heo  *  -EAGAIN	if PENDING couldn't be grabbed at the moment, safe to busy-retry
1245bbb68dfaSTejun Heo  *  -ENOENT	if someone else is canceling @work, this state may persist
1246bbb68dfaSTejun Heo  *		for arbitrarily long
12473eb6b31bSMauro Carvalho Chehab  *  ========	================================================================
124836e227d2STejun Heo  *
1249d185af30SYacine Belkadi  * Note:
1250bbb68dfaSTejun Heo  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1251e0aecdd8STejun Heo  * interrupted while holding PENDING and @work off queue, irq must be
1252e0aecdd8STejun Heo  * disabled on entry.  This, combined with delayed_work->timer being
1253e0aecdd8STejun Heo  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1254bbb68dfaSTejun Heo  *
1255bbb68dfaSTejun Heo  * On successful return, >= 0, irq is disabled and the caller is
1256bbb68dfaSTejun Heo  * responsible for releasing it using local_irq_restore(*@flags).
1257bbb68dfaSTejun Heo  *
1258e0aecdd8STejun Heo  * This function is safe to call from any context including IRQ handler.
1259bf4ede01STejun Heo  */
1260bbb68dfaSTejun Heo static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1261bbb68dfaSTejun Heo 			       unsigned long *flags)
1262bf4ede01STejun Heo {
1263d565ed63STejun Heo 	struct worker_pool *pool;
1264112202d9STejun Heo 	struct pool_workqueue *pwq;
1265bf4ede01STejun Heo 
1266bbb68dfaSTejun Heo 	local_irq_save(*flags);
1267bbb68dfaSTejun Heo 
126836e227d2STejun Heo 	/* try to steal the timer if it exists */
126936e227d2STejun Heo 	if (is_dwork) {
127036e227d2STejun Heo 		struct delayed_work *dwork = to_delayed_work(work);
127136e227d2STejun Heo 
1272e0aecdd8STejun Heo 		/*
1273e0aecdd8STejun Heo 		 * dwork->timer is irqsafe.  If del_timer() fails, it's
1274e0aecdd8STejun Heo 		 * guaranteed that the timer is not queued anywhere and not
1275e0aecdd8STejun Heo 		 * running on the local CPU.
1276e0aecdd8STejun Heo 		 */
127736e227d2STejun Heo 		if (likely(del_timer(&dwork->timer)))
127836e227d2STejun Heo 			return 1;
127936e227d2STejun Heo 	}
128036e227d2STejun Heo 
128136e227d2STejun Heo 	/* try to claim PENDING the normal way */
1282bf4ede01STejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1283bf4ede01STejun Heo 		return 0;
1284bf4ede01STejun Heo 
128524acfb71SThomas Gleixner 	rcu_read_lock();
1286bf4ede01STejun Heo 	/*
1287bf4ede01STejun Heo 	 * The queueing is in progress, or it is already queued. Try to
1288bf4ede01STejun Heo 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1289bf4ede01STejun Heo 	 */
1290d565ed63STejun Heo 	pool = get_work_pool(work);
1291d565ed63STejun Heo 	if (!pool)
1292bbb68dfaSTejun Heo 		goto fail;
1293bf4ede01STejun Heo 
1294a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock(&pool->lock);
1295bf4ede01STejun Heo 	/*
1296112202d9STejun Heo 	 * work->data is guaranteed to point to pwq only while the work
1297112202d9STejun Heo 	 * item is queued on pwq->wq, and both updating work->data to point
1298112202d9STejun Heo 	 * to pwq on queueing and to pool on dequeueing are done under
1299112202d9STejun Heo 	 * pwq->pool->lock.  This in turn guarantees that, if work->data
1300112202d9STejun Heo 	 * points to pwq which is associated with a locked pool, the work
13010b3dae68SLai Jiangshan 	 * item is currently queued on that pool.
1302bf4ede01STejun Heo 	 */
1303112202d9STejun Heo 	pwq = get_work_pwq(work);
1304112202d9STejun Heo 	if (pwq && pwq->pool == pool) {
1305bf4ede01STejun Heo 		debug_work_deactivate(work);
13063aa62497SLai Jiangshan 
13073aa62497SLai Jiangshan 		/*
1308018f3a13SLai Jiangshan 		 * A cancelable inactive work item must be in the
1309018f3a13SLai Jiangshan 		 * pwq->inactive_works since a queued barrier can't be
1310018f3a13SLai Jiangshan 		 * canceled (see the comments in insert_wq_barrier()).
1311018f3a13SLai Jiangshan 		 *
1312f97a4a1aSLai Jiangshan 		 * An inactive work item cannot be grabbed directly because
1313d812796eSLai Jiangshan 		 * it might have linked barrier work items which, if left
1314f97a4a1aSLai Jiangshan 		 * on the inactive_works list, will confuse pwq->nr_active
131516062836STejun Heo 		 * management later on and cause stall.  Make sure the work
131616062836STejun Heo 		 * item is activated before grabbing.
13173aa62497SLai Jiangshan 		 */
1318f97a4a1aSLai Jiangshan 		if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1319f97a4a1aSLai Jiangshan 			pwq_activate_inactive_work(work);
13203aa62497SLai Jiangshan 
1321bf4ede01STejun Heo 		list_del_init(&work->entry);
1322c4560c2cSLai Jiangshan 		pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
132336e227d2STejun Heo 
1324112202d9STejun Heo 		/* work->data points to pwq iff queued, point to pool */
13254468a00fSLai Jiangshan 		set_work_pool_and_keep_pending(work, pool->id);
13264468a00fSLai Jiangshan 
1327a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock(&pool->lock);
132824acfb71SThomas Gleixner 		rcu_read_unlock();
132936e227d2STejun Heo 		return 1;
1330bf4ede01STejun Heo 	}
1331a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock(&pool->lock);
1332bbb68dfaSTejun Heo fail:
133324acfb71SThomas Gleixner 	rcu_read_unlock();
1334bbb68dfaSTejun Heo 	local_irq_restore(*flags);
1335bbb68dfaSTejun Heo 	if (work_is_canceling(work))
1336bbb68dfaSTejun Heo 		return -ENOENT;
1337bbb68dfaSTejun Heo 	cpu_relax();
133836e227d2STejun Heo 	return -EAGAIN;
1339bf4ede01STejun Heo }
1340bf4ede01STejun Heo 
1341bf4ede01STejun Heo /**
1342706026c2STejun Heo  * insert_work - insert a work into a pool
1343112202d9STejun Heo  * @pwq: pwq @work belongs to
13444690c4abSTejun Heo  * @work: work to insert
13454690c4abSTejun Heo  * @head: insertion point
13464690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
13474690c4abSTejun Heo  *
1348112202d9STejun Heo  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1349706026c2STejun Heo  * work_struct flags.
13504690c4abSTejun Heo  *
13514690c4abSTejun Heo  * CONTEXT:
1352a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
1353365970a1SDavid Howells  */
1354112202d9STejun Heo static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1355112202d9STejun Heo 			struct list_head *head, unsigned int extra_flags)
1356b89deed3SOleg Nesterov {
1357112202d9STejun Heo 	struct worker_pool *pool = pwq->pool;
1358e1d8aa9fSFrederic Weisbecker 
1359e89a85d6SWalter Wu 	/* record the work call stack in order to print it in KASAN reports */
1360f70da745SMarco Elver 	kasan_record_aux_stack_noalloc(work);
1361e89a85d6SWalter Wu 
13624690c4abSTejun Heo 	/* we own @work, set data and link */
1363112202d9STejun Heo 	set_work_pwq(work, pwq, extra_flags);
13641a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
13658864b4e5STejun Heo 	get_pwq(pwq);
1366e22bee78STejun Heo 
136763d95a91STejun Heo 	if (__need_more_worker(pool))
136863d95a91STejun Heo 		wake_up_worker(pool);
1369b89deed3SOleg Nesterov }
1370b89deed3SOleg Nesterov 
1371c8efcc25STejun Heo /*
1372c8efcc25STejun Heo  * Test whether @work is being queued from another work executing on the
13738d03ecfeSTejun Heo  * same workqueue.
1374c8efcc25STejun Heo  */
1375c8efcc25STejun Heo static bool is_chained_work(struct workqueue_struct *wq)
1376c8efcc25STejun Heo {
1377c8efcc25STejun Heo 	struct worker *worker;
1378c8efcc25STejun Heo 
13798d03ecfeSTejun Heo 	worker = current_wq_worker();
1380c8efcc25STejun Heo 	/*
1381bf393fd4SBart Van Assche 	 * Return %true iff I'm a worker executing a work item on @wq.  If
13828d03ecfeSTejun Heo 	 * I'm @worker, it's safe to dereference it without locking.
1383c8efcc25STejun Heo 	 */
1384112202d9STejun Heo 	return worker && worker->current_pwq->wq == wq;
1385c8efcc25STejun Heo }
1386c8efcc25STejun Heo 
1387ef557180SMike Galbraith /*
1388ef557180SMike Galbraith  * When queueing an unbound work item to a wq, prefer local CPU if allowed
1389ef557180SMike Galbraith  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1390ef557180SMike Galbraith  * avoid perturbing sensitive tasks.
1391ef557180SMike Galbraith  */
1392ef557180SMike Galbraith static int wq_select_unbound_cpu(int cpu)
1393ef557180SMike Galbraith {
1394f303fccbSTejun Heo 	static bool printed_dbg_warning;
1395ef557180SMike Galbraith 	int new_cpu;
1396ef557180SMike Galbraith 
1397f303fccbSTejun Heo 	if (likely(!wq_debug_force_rr_cpu)) {
1398ef557180SMike Galbraith 		if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1399ef557180SMike Galbraith 			return cpu;
1400f303fccbSTejun Heo 	} else if (!printed_dbg_warning) {
1401f303fccbSTejun Heo 		pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1402f303fccbSTejun Heo 		printed_dbg_warning = true;
1403f303fccbSTejun Heo 	}
1404f303fccbSTejun Heo 
1405ef557180SMike Galbraith 	if (cpumask_empty(wq_unbound_cpumask))
1406ef557180SMike Galbraith 		return cpu;
1407ef557180SMike Galbraith 
1408ef557180SMike Galbraith 	new_cpu = __this_cpu_read(wq_rr_cpu_last);
1409ef557180SMike Galbraith 	new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1410ef557180SMike Galbraith 	if (unlikely(new_cpu >= nr_cpu_ids)) {
1411ef557180SMike Galbraith 		new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1412ef557180SMike Galbraith 		if (unlikely(new_cpu >= nr_cpu_ids))
1413ef557180SMike Galbraith 			return cpu;
1414ef557180SMike Galbraith 	}
1415ef557180SMike Galbraith 	__this_cpu_write(wq_rr_cpu_last, new_cpu);
1416ef557180SMike Galbraith 
1417ef557180SMike Galbraith 	return new_cpu;
1418ef557180SMike Galbraith }
1419ef557180SMike Galbraith 
1420d84ff051STejun Heo static void __queue_work(int cpu, struct workqueue_struct *wq,
14211da177e4SLinus Torvalds 			 struct work_struct *work)
14221da177e4SLinus Torvalds {
1423112202d9STejun Heo 	struct pool_workqueue *pwq;
1424c9178087STejun Heo 	struct worker_pool *last_pool;
14251e19ffc6STejun Heo 	struct list_head *worklist;
14268a2e8e5dSTejun Heo 	unsigned int work_flags;
1427b75cac93SJoonsoo Kim 	unsigned int req_cpu = cpu;
14288930cabaSTejun Heo 
14298930cabaSTejun Heo 	/*
14308930cabaSTejun Heo 	 * While a work item is PENDING && off queue, a task trying to
14318930cabaSTejun Heo 	 * steal the PENDING will busy-loop waiting for it to either get
14328930cabaSTejun Heo 	 * queued or lose PENDING.  Grabbing PENDING and queueing should
14338930cabaSTejun Heo 	 * happen with IRQ disabled.
14348930cabaSTejun Heo 	 */
14358e8eb730SFrederic Weisbecker 	lockdep_assert_irqs_disabled();
14361da177e4SLinus Torvalds 
14371e19ffc6STejun Heo 
143833e3f0a3SRichard Clark 	/*
143933e3f0a3SRichard Clark 	 * For a draining wq, only works from the same workqueue are
144033e3f0a3SRichard Clark 	 * allowed. The __WQ_DESTROYING helps to spot the issue that
144133e3f0a3SRichard Clark 	 * queues a new work item to a wq after destroy_workqueue(wq).
144233e3f0a3SRichard Clark 	 */
144333e3f0a3SRichard Clark 	if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
144433e3f0a3SRichard Clark 		     WARN_ON_ONCE(!is_chained_work(wq))))
1445e41e704bSTejun Heo 		return;
144624acfb71SThomas Gleixner 	rcu_read_lock();
14479e8cd2f5STejun Heo retry:
1448aa202f1fSHillf Danton 	/* pwq which will be used unless @work is executing elsewhere */
1449aa202f1fSHillf Danton 	if (wq->flags & WQ_UNBOUND) {
1450df2d5ae4STejun Heo 		if (req_cpu == WORK_CPU_UNBOUND)
1451ef557180SMike Galbraith 			cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1452df2d5ae4STejun Heo 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1453aa202f1fSHillf Danton 	} else {
1454aa202f1fSHillf Danton 		if (req_cpu == WORK_CPU_UNBOUND)
1455aa202f1fSHillf Danton 			cpu = raw_smp_processor_id();
1456aa202f1fSHillf Danton 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1457aa202f1fSHillf Danton 	}
1458f3421797STejun Heo 
145918aa9effSTejun Heo 	/*
1460c9178087STejun Heo 	 * If @work was previously on a different pool, it might still be
1461c9178087STejun Heo 	 * running there, in which case the work needs to be queued on that
1462c9178087STejun Heo 	 * pool to guarantee non-reentrancy.
146318aa9effSTejun Heo 	 */
1464c9e7cf27STejun Heo 	last_pool = get_work_pool(work);
1465112202d9STejun Heo 	if (last_pool && last_pool != pwq->pool) {
146618aa9effSTejun Heo 		struct worker *worker;
146718aa9effSTejun Heo 
1468a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock(&last_pool->lock);
146918aa9effSTejun Heo 
1470c9e7cf27STejun Heo 		worker = find_worker_executing_work(last_pool, work);
147118aa9effSTejun Heo 
1472112202d9STejun Heo 		if (worker && worker->current_pwq->wq == wq) {
1473c9178087STejun Heo 			pwq = worker->current_pwq;
14748594fadeSLai Jiangshan 		} else {
147518aa9effSTejun Heo 			/* meh... not running there, queue here */
1476a9b8a985SSebastian Andrzej Siewior 			raw_spin_unlock(&last_pool->lock);
1477a9b8a985SSebastian Andrzej Siewior 			raw_spin_lock(&pwq->pool->lock);
147818aa9effSTejun Heo 		}
14798930cabaSTejun Heo 	} else {
1480a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock(&pwq->pool->lock);
14818930cabaSTejun Heo 	}
1482502ca9d8STejun Heo 
14839e8cd2f5STejun Heo 	/*
14849e8cd2f5STejun Heo 	 * pwq is determined and locked.  For unbound pools, we could have
14859e8cd2f5STejun Heo 	 * raced with pwq release and it could already be dead.  If its
14869e8cd2f5STejun Heo 	 * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1487df2d5ae4STejun Heo 	 * without another pwq replacing it in the numa_pwq_tbl or while
1488df2d5ae4STejun Heo 	 * work items are executing on it, so the retrying is guaranteed to
14899e8cd2f5STejun Heo 	 * make forward-progress.
14909e8cd2f5STejun Heo 	 */
14919e8cd2f5STejun Heo 	if (unlikely(!pwq->refcnt)) {
14929e8cd2f5STejun Heo 		if (wq->flags & WQ_UNBOUND) {
1493a9b8a985SSebastian Andrzej Siewior 			raw_spin_unlock(&pwq->pool->lock);
14949e8cd2f5STejun Heo 			cpu_relax();
14959e8cd2f5STejun Heo 			goto retry;
14969e8cd2f5STejun Heo 		}
14979e8cd2f5STejun Heo 		/* oops */
14989e8cd2f5STejun Heo 		WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
14999e8cd2f5STejun Heo 			  wq->name, cpu);
15009e8cd2f5STejun Heo 	}
15019e8cd2f5STejun Heo 
1502112202d9STejun Heo 	/* pwq determined, queue */
1503112202d9STejun Heo 	trace_workqueue_queue_work(req_cpu, pwq, work);
1504502ca9d8STejun Heo 
150524acfb71SThomas Gleixner 	if (WARN_ON(!list_empty(&work->entry)))
150624acfb71SThomas Gleixner 		goto out;
15071e19ffc6STejun Heo 
1508112202d9STejun Heo 	pwq->nr_in_flight[pwq->work_color]++;
1509112202d9STejun Heo 	work_flags = work_color_to_flags(pwq->work_color);
15101e19ffc6STejun Heo 
1511112202d9STejun Heo 	if (likely(pwq->nr_active < pwq->max_active)) {
1512cdadf009STejun Heo 		trace_workqueue_activate_work(work);
1513112202d9STejun Heo 		pwq->nr_active++;
1514112202d9STejun Heo 		worklist = &pwq->pool->worklist;
151582607adcSTejun Heo 		if (list_empty(worklist))
151682607adcSTejun Heo 			pwq->pool->watchdog_ts = jiffies;
15178a2e8e5dSTejun Heo 	} else {
1518f97a4a1aSLai Jiangshan 		work_flags |= WORK_STRUCT_INACTIVE;
1519f97a4a1aSLai Jiangshan 		worklist = &pwq->inactive_works;
15208a2e8e5dSTejun Heo 	}
15211e19ffc6STejun Heo 
15220687c66bSZqiang 	debug_work_activate(work);
1523112202d9STejun Heo 	insert_work(pwq, work, worklist, work_flags);
15241e19ffc6STejun Heo 
152524acfb71SThomas Gleixner out:
1526a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock(&pwq->pool->lock);
152724acfb71SThomas Gleixner 	rcu_read_unlock();
15281da177e4SLinus Torvalds }
15291da177e4SLinus Torvalds 
15300fcb78c2SRolf Eike Beer /**
1531c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
1532c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
1533c1a220e7SZhang Rui  * @wq: workqueue to use
1534c1a220e7SZhang Rui  * @work: work to queue
1535c1a220e7SZhang Rui  *
1536c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
1537443378f0SPaul E. McKenney  * can't go away.  Callers that fail to ensure that the specified
1538443378f0SPaul E. McKenney  * CPU cannot go away will execute on a randomly chosen CPU.
1539d185af30SYacine Belkadi  *
1540d185af30SYacine Belkadi  * Return: %false if @work was already on a queue, %true otherwise.
1541c1a220e7SZhang Rui  */
1542d4283e93STejun Heo bool queue_work_on(int cpu, struct workqueue_struct *wq,
1543d4283e93STejun Heo 		   struct work_struct *work)
1544c1a220e7SZhang Rui {
1545d4283e93STejun Heo 	bool ret = false;
15468930cabaSTejun Heo 	unsigned long flags;
15478930cabaSTejun Heo 
15488930cabaSTejun Heo 	local_irq_save(flags);
1549c1a220e7SZhang Rui 
155022df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
15514690c4abSTejun Heo 		__queue_work(cpu, wq, work);
1552d4283e93STejun Heo 		ret = true;
1553c1a220e7SZhang Rui 	}
15548930cabaSTejun Heo 
15558930cabaSTejun Heo 	local_irq_restore(flags);
1556c1a220e7SZhang Rui 	return ret;
1557c1a220e7SZhang Rui }
1558ad7b1f84SMarc Dionne EXPORT_SYMBOL(queue_work_on);
1559c1a220e7SZhang Rui 
15608204e0c1SAlexander Duyck /**
15618204e0c1SAlexander Duyck  * workqueue_select_cpu_near - Select a CPU based on NUMA node
15628204e0c1SAlexander Duyck  * @node: NUMA node ID that we want to select a CPU from
15638204e0c1SAlexander Duyck  *
15648204e0c1SAlexander Duyck  * This function will attempt to find a "random" cpu available on a given
15658204e0c1SAlexander Duyck  * node. If there are no CPUs available on the given node it will return
15668204e0c1SAlexander Duyck  * WORK_CPU_UNBOUND indicating that we should just schedule to any
15678204e0c1SAlexander Duyck  * available CPU if we need to schedule this work.
15688204e0c1SAlexander Duyck  */
15698204e0c1SAlexander Duyck static int workqueue_select_cpu_near(int node)
15708204e0c1SAlexander Duyck {
15718204e0c1SAlexander Duyck 	int cpu;
15728204e0c1SAlexander Duyck 
15738204e0c1SAlexander Duyck 	/* No point in doing this if NUMA isn't enabled for workqueues */
15748204e0c1SAlexander Duyck 	if (!wq_numa_enabled)
15758204e0c1SAlexander Duyck 		return WORK_CPU_UNBOUND;
15768204e0c1SAlexander Duyck 
15778204e0c1SAlexander Duyck 	/* Delay binding to CPU if node is not valid or online */
15788204e0c1SAlexander Duyck 	if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
15798204e0c1SAlexander Duyck 		return WORK_CPU_UNBOUND;
15808204e0c1SAlexander Duyck 
15818204e0c1SAlexander Duyck 	/* Use local node/cpu if we are already there */
15828204e0c1SAlexander Duyck 	cpu = raw_smp_processor_id();
15838204e0c1SAlexander Duyck 	if (node == cpu_to_node(cpu))
15848204e0c1SAlexander Duyck 		return cpu;
15858204e0c1SAlexander Duyck 
15868204e0c1SAlexander Duyck 	/* Use "random" otherwise know as "first" online CPU of node */
15878204e0c1SAlexander Duyck 	cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
15888204e0c1SAlexander Duyck 
15898204e0c1SAlexander Duyck 	/* If CPU is valid return that, otherwise just defer */
15908204e0c1SAlexander Duyck 	return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
15918204e0c1SAlexander Duyck }
15928204e0c1SAlexander Duyck 
15938204e0c1SAlexander Duyck /**
15948204e0c1SAlexander Duyck  * queue_work_node - queue work on a "random" cpu for a given NUMA node
15958204e0c1SAlexander Duyck  * @node: NUMA node that we are targeting the work for
15968204e0c1SAlexander Duyck  * @wq: workqueue to use
15978204e0c1SAlexander Duyck  * @work: work to queue
15988204e0c1SAlexander Duyck  *
15998204e0c1SAlexander Duyck  * We queue the work to a "random" CPU within a given NUMA node. The basic
16008204e0c1SAlexander Duyck  * idea here is to provide a way to somehow associate work with a given
16018204e0c1SAlexander Duyck  * NUMA node.
16028204e0c1SAlexander Duyck  *
16038204e0c1SAlexander Duyck  * This function will only make a best effort attempt at getting this onto
16048204e0c1SAlexander Duyck  * the right NUMA node. If no node is requested or the requested node is
16058204e0c1SAlexander Duyck  * offline then we just fall back to standard queue_work behavior.
16068204e0c1SAlexander Duyck  *
16078204e0c1SAlexander Duyck  * Currently the "random" CPU ends up being the first available CPU in the
16088204e0c1SAlexander Duyck  * intersection of cpu_online_mask and the cpumask of the node, unless we
16098204e0c1SAlexander Duyck  * are running on the node. In that case we just use the current CPU.
16108204e0c1SAlexander Duyck  *
16118204e0c1SAlexander Duyck  * Return: %false if @work was already on a queue, %true otherwise.
16128204e0c1SAlexander Duyck  */
16138204e0c1SAlexander Duyck bool queue_work_node(int node, struct workqueue_struct *wq,
16148204e0c1SAlexander Duyck 		     struct work_struct *work)
16158204e0c1SAlexander Duyck {
16168204e0c1SAlexander Duyck 	unsigned long flags;
16178204e0c1SAlexander Duyck 	bool ret = false;
16188204e0c1SAlexander Duyck 
16198204e0c1SAlexander Duyck 	/*
16208204e0c1SAlexander Duyck 	 * This current implementation is specific to unbound workqueues.
16218204e0c1SAlexander Duyck 	 * Specifically we only return the first available CPU for a given
16228204e0c1SAlexander Duyck 	 * node instead of cycling through individual CPUs within the node.
16238204e0c1SAlexander Duyck 	 *
16248204e0c1SAlexander Duyck 	 * If this is used with a per-cpu workqueue then the logic in
16258204e0c1SAlexander Duyck 	 * workqueue_select_cpu_near would need to be updated to allow for
16268204e0c1SAlexander Duyck 	 * some round robin type logic.
16278204e0c1SAlexander Duyck 	 */
16288204e0c1SAlexander Duyck 	WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
16298204e0c1SAlexander Duyck 
16308204e0c1SAlexander Duyck 	local_irq_save(flags);
16318204e0c1SAlexander Duyck 
16328204e0c1SAlexander Duyck 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
16338204e0c1SAlexander Duyck 		int cpu = workqueue_select_cpu_near(node);
16348204e0c1SAlexander Duyck 
16358204e0c1SAlexander Duyck 		__queue_work(cpu, wq, work);
16368204e0c1SAlexander Duyck 		ret = true;
16378204e0c1SAlexander Duyck 	}
16388204e0c1SAlexander Duyck 
16398204e0c1SAlexander Duyck 	local_irq_restore(flags);
16408204e0c1SAlexander Duyck 	return ret;
16418204e0c1SAlexander Duyck }
16428204e0c1SAlexander Duyck EXPORT_SYMBOL_GPL(queue_work_node);
16438204e0c1SAlexander Duyck 
16448c20feb6SKees Cook void delayed_work_timer_fn(struct timer_list *t)
16451da177e4SLinus Torvalds {
16468c20feb6SKees Cook 	struct delayed_work *dwork = from_timer(dwork, t, timer);
16471da177e4SLinus Torvalds 
1648e0aecdd8STejun Heo 	/* should have been called from irqsafe timer with irq already off */
164960c057bcSLai Jiangshan 	__queue_work(dwork->cpu, dwork->wq, &dwork->work);
16501da177e4SLinus Torvalds }
16511438ade5SKonstantin Khlebnikov EXPORT_SYMBOL(delayed_work_timer_fn);
16521da177e4SLinus Torvalds 
16537beb2edfSTejun Heo static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
165452bad64dSDavid Howells 				struct delayed_work *dwork, unsigned long delay)
16551da177e4SLinus Torvalds {
16567beb2edfSTejun Heo 	struct timer_list *timer = &dwork->timer;
16577beb2edfSTejun Heo 	struct work_struct *work = &dwork->work;
16581da177e4SLinus Torvalds 
1659637fdbaeSTejun Heo 	WARN_ON_ONCE(!wq);
16604b243563SSami Tolvanen 	WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1661fc4b514fSTejun Heo 	WARN_ON_ONCE(timer_pending(timer));
1662fc4b514fSTejun Heo 	WARN_ON_ONCE(!list_empty(&work->entry));
16637beb2edfSTejun Heo 
16648852aac2STejun Heo 	/*
16658852aac2STejun Heo 	 * If @delay is 0, queue @dwork->work immediately.  This is for
16668852aac2STejun Heo 	 * both optimization and correctness.  The earliest @timer can
16678852aac2STejun Heo 	 * expire is on the closest next tick and delayed_work users depend
16688852aac2STejun Heo 	 * on that there's no such delay when @delay is 0.
16698852aac2STejun Heo 	 */
16708852aac2STejun Heo 	if (!delay) {
16718852aac2STejun Heo 		__queue_work(cpu, wq, &dwork->work);
16728852aac2STejun Heo 		return;
16738852aac2STejun Heo 	}
16748852aac2STejun Heo 
167560c057bcSLai Jiangshan 	dwork->wq = wq;
16761265057fSTejun Heo 	dwork->cpu = cpu;
16777beb2edfSTejun Heo 	timer->expires = jiffies + delay;
16787beb2edfSTejun Heo 
1679041bd12eSTejun Heo 	if (unlikely(cpu != WORK_CPU_UNBOUND))
16807beb2edfSTejun Heo 		add_timer_on(timer, cpu);
1681041bd12eSTejun Heo 	else
1682041bd12eSTejun Heo 		add_timer(timer);
16837beb2edfSTejun Heo }
16841da177e4SLinus Torvalds 
16850fcb78c2SRolf Eike Beer /**
16860fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
16870fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
16880fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1689af9997e4SRandy Dunlap  * @dwork: work to queue
16900fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
16910fcb78c2SRolf Eike Beer  *
1692d185af30SYacine Belkadi  * Return: %false if @work was already on a queue, %true otherwise.  If
1693715f1300STejun Heo  * @delay is zero and @dwork is idle, it will be scheduled for immediate
1694715f1300STejun Heo  * execution.
16950fcb78c2SRolf Eike Beer  */
1696d4283e93STejun Heo bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
169752bad64dSDavid Howells 			   struct delayed_work *dwork, unsigned long delay)
16987a6bc1cdSVenkatesh Pallipadi {
169952bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
1700d4283e93STejun Heo 	bool ret = false;
17018930cabaSTejun Heo 	unsigned long flags;
17028930cabaSTejun Heo 
17038930cabaSTejun Heo 	/* read the comment in __queue_work() */
17048930cabaSTejun Heo 	local_irq_save(flags);
17057a6bc1cdSVenkatesh Pallipadi 
170622df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
17077beb2edfSTejun Heo 		__queue_delayed_work(cpu, wq, dwork, delay);
1708d4283e93STejun Heo 		ret = true;
17097a6bc1cdSVenkatesh Pallipadi 	}
17108930cabaSTejun Heo 
17118930cabaSTejun Heo 	local_irq_restore(flags);
17127a6bc1cdSVenkatesh Pallipadi 	return ret;
17137a6bc1cdSVenkatesh Pallipadi }
1714ad7b1f84SMarc Dionne EXPORT_SYMBOL(queue_delayed_work_on);
17151da177e4SLinus Torvalds 
1716c8e55f36STejun Heo /**
17178376fe22STejun Heo  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
17188376fe22STejun Heo  * @cpu: CPU number to execute work on
17198376fe22STejun Heo  * @wq: workqueue to use
17208376fe22STejun Heo  * @dwork: work to queue
17218376fe22STejun Heo  * @delay: number of jiffies to wait before queueing
17228376fe22STejun Heo  *
17238376fe22STejun Heo  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
17248376fe22STejun Heo  * modify @dwork's timer so that it expires after @delay.  If @delay is
17258376fe22STejun Heo  * zero, @work is guaranteed to be scheduled immediately regardless of its
17268376fe22STejun Heo  * current state.
17278376fe22STejun Heo  *
1728d185af30SYacine Belkadi  * Return: %false if @dwork was idle and queued, %true if @dwork was
17298376fe22STejun Heo  * pending and its timer was modified.
17308376fe22STejun Heo  *
1731e0aecdd8STejun Heo  * This function is safe to call from any context including IRQ handler.
17328376fe22STejun Heo  * See try_to_grab_pending() for details.
17338376fe22STejun Heo  */
17348376fe22STejun Heo bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
17358376fe22STejun Heo 			 struct delayed_work *dwork, unsigned long delay)
17368376fe22STejun Heo {
17378376fe22STejun Heo 	unsigned long flags;
17388376fe22STejun Heo 	int ret;
17398376fe22STejun Heo 
17408376fe22STejun Heo 	do {
17418376fe22STejun Heo 		ret = try_to_grab_pending(&dwork->work, true, &flags);
17428376fe22STejun Heo 	} while (unlikely(ret == -EAGAIN));
17438376fe22STejun Heo 
17448376fe22STejun Heo 	if (likely(ret >= 0)) {
17458376fe22STejun Heo 		__queue_delayed_work(cpu, wq, dwork, delay);
17468376fe22STejun Heo 		local_irq_restore(flags);
17478376fe22STejun Heo 	}
17488376fe22STejun Heo 
17498376fe22STejun Heo 	/* -ENOENT from try_to_grab_pending() becomes %true */
17508376fe22STejun Heo 	return ret;
17518376fe22STejun Heo }
17528376fe22STejun Heo EXPORT_SYMBOL_GPL(mod_delayed_work_on);
17538376fe22STejun Heo 
175405f0fe6bSTejun Heo static void rcu_work_rcufn(struct rcu_head *rcu)
175505f0fe6bSTejun Heo {
175605f0fe6bSTejun Heo 	struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
175705f0fe6bSTejun Heo 
175805f0fe6bSTejun Heo 	/* read the comment in __queue_work() */
175905f0fe6bSTejun Heo 	local_irq_disable();
176005f0fe6bSTejun Heo 	__queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
176105f0fe6bSTejun Heo 	local_irq_enable();
176205f0fe6bSTejun Heo }
176305f0fe6bSTejun Heo 
176405f0fe6bSTejun Heo /**
176505f0fe6bSTejun Heo  * queue_rcu_work - queue work after a RCU grace period
176605f0fe6bSTejun Heo  * @wq: workqueue to use
176705f0fe6bSTejun Heo  * @rwork: work to queue
176805f0fe6bSTejun Heo  *
176905f0fe6bSTejun Heo  * Return: %false if @rwork was already pending, %true otherwise.  Note
177005f0fe6bSTejun Heo  * that a full RCU grace period is guaranteed only after a %true return.
1771bf393fd4SBart Van Assche  * While @rwork is guaranteed to be executed after a %false return, the
177205f0fe6bSTejun Heo  * execution may happen before a full RCU grace period has passed.
177305f0fe6bSTejun Heo  */
177405f0fe6bSTejun Heo bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
177505f0fe6bSTejun Heo {
177605f0fe6bSTejun Heo 	struct work_struct *work = &rwork->work;
177705f0fe6bSTejun Heo 
177805f0fe6bSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
177905f0fe6bSTejun Heo 		rwork->wq = wq;
1780a7e30c0eSUladzislau Rezki 		call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
178105f0fe6bSTejun Heo 		return true;
178205f0fe6bSTejun Heo 	}
178305f0fe6bSTejun Heo 
178405f0fe6bSTejun Heo 	return false;
178505f0fe6bSTejun Heo }
178605f0fe6bSTejun Heo EXPORT_SYMBOL(queue_rcu_work);
178705f0fe6bSTejun Heo 
17888376fe22STejun Heo /**
1789c8e55f36STejun Heo  * worker_enter_idle - enter idle state
1790c8e55f36STejun Heo  * @worker: worker which is entering idle state
1791c8e55f36STejun Heo  *
1792c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
1793c8e55f36STejun Heo  * necessary.
1794c8e55f36STejun Heo  *
1795c8e55f36STejun Heo  * LOCKING:
1796a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
1797c8e55f36STejun Heo  */
1798c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
17991da177e4SLinus Torvalds {
1800bd7bdd43STejun Heo 	struct worker_pool *pool = worker->pool;
1801c8e55f36STejun Heo 
18026183c009STejun Heo 	if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
18036183c009STejun Heo 	    WARN_ON_ONCE(!list_empty(&worker->entry) &&
18046183c009STejun Heo 			 (worker->hentry.next || worker->hentry.pprev)))
18056183c009STejun Heo 		return;
1806c8e55f36STejun Heo 
1807051e1850SLai Jiangshan 	/* can't use worker_set_flags(), also called from create_worker() */
1808cb444766STejun Heo 	worker->flags |= WORKER_IDLE;
1809bd7bdd43STejun Heo 	pool->nr_idle++;
1810e22bee78STejun Heo 	worker->last_active = jiffies;
1811c8e55f36STejun Heo 
1812c8e55f36STejun Heo 	/* idle_list is LIFO */
1813bd7bdd43STejun Heo 	list_add(&worker->entry, &pool->idle_list);
1814db7bccf4STejun Heo 
181563d95a91STejun Heo 	if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1816628c78e7STejun Heo 		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1817cb444766STejun Heo 
1818989442d7SLai Jiangshan 	/* Sanity check nr_running. */
1819bc35f7efSLai Jiangshan 	WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
1820c8e55f36STejun Heo }
1821c8e55f36STejun Heo 
1822c8e55f36STejun Heo /**
1823c8e55f36STejun Heo  * worker_leave_idle - leave idle state
1824c8e55f36STejun Heo  * @worker: worker which is leaving idle state
1825c8e55f36STejun Heo  *
1826c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
1827c8e55f36STejun Heo  *
1828c8e55f36STejun Heo  * LOCKING:
1829a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
1830c8e55f36STejun Heo  */
1831c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
1832c8e55f36STejun Heo {
1833bd7bdd43STejun Heo 	struct worker_pool *pool = worker->pool;
1834c8e55f36STejun Heo 
18356183c009STejun Heo 	if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
18366183c009STejun Heo 		return;
1837d302f017STejun Heo 	worker_clr_flags(worker, WORKER_IDLE);
1838bd7bdd43STejun Heo 	pool->nr_idle--;
1839c8e55f36STejun Heo 	list_del_init(&worker->entry);
1840c8e55f36STejun Heo }
1841c8e55f36STejun Heo 
1842f7537df5SLai Jiangshan static struct worker *alloc_worker(int node)
1843c34056a3STejun Heo {
1844c34056a3STejun Heo 	struct worker *worker;
1845c34056a3STejun Heo 
1846f7537df5SLai Jiangshan 	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1847c8e55f36STejun Heo 	if (worker) {
1848c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
1849affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
1850da028469SLai Jiangshan 		INIT_LIST_HEAD(&worker->node);
1851e22bee78STejun Heo 		/* on creation a worker is in !idle && prep state */
1852e22bee78STejun Heo 		worker->flags = WORKER_PREP;
1853c8e55f36STejun Heo 	}
1854c34056a3STejun Heo 	return worker;
1855c34056a3STejun Heo }
1856c34056a3STejun Heo 
1857c34056a3STejun Heo /**
18584736cbf7SLai Jiangshan  * worker_attach_to_pool() - attach a worker to a pool
18594736cbf7SLai Jiangshan  * @worker: worker to be attached
18604736cbf7SLai Jiangshan  * @pool: the target pool
18614736cbf7SLai Jiangshan  *
18624736cbf7SLai Jiangshan  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
18634736cbf7SLai Jiangshan  * cpu-binding of @worker are kept coordinated with the pool across
18644736cbf7SLai Jiangshan  * cpu-[un]hotplugs.
18654736cbf7SLai Jiangshan  */
18664736cbf7SLai Jiangshan static void worker_attach_to_pool(struct worker *worker,
18674736cbf7SLai Jiangshan 				   struct worker_pool *pool)
18684736cbf7SLai Jiangshan {
18691258fae7STejun Heo 	mutex_lock(&wq_pool_attach_mutex);
18704736cbf7SLai Jiangshan 
18714736cbf7SLai Jiangshan 	/*
18721258fae7STejun Heo 	 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
18731258fae7STejun Heo 	 * stable across this function.  See the comments above the flag
18741258fae7STejun Heo 	 * definition for details.
18754736cbf7SLai Jiangshan 	 */
18764736cbf7SLai Jiangshan 	if (pool->flags & POOL_DISASSOCIATED)
18774736cbf7SLai Jiangshan 		worker->flags |= WORKER_UNBOUND;
18785c25b5ffSPeter Zijlstra 	else
18795c25b5ffSPeter Zijlstra 		kthread_set_per_cpu(worker->task, pool->cpu);
18804736cbf7SLai Jiangshan 
1881640f17c8SPeter Zijlstra 	if (worker->rescue_wq)
1882640f17c8SPeter Zijlstra 		set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1883640f17c8SPeter Zijlstra 
18844736cbf7SLai Jiangshan 	list_add_tail(&worker->node, &pool->workers);
1885a2d812a2STejun Heo 	worker->pool = pool;
18864736cbf7SLai Jiangshan 
18871258fae7STejun Heo 	mutex_unlock(&wq_pool_attach_mutex);
18884736cbf7SLai Jiangshan }
18894736cbf7SLai Jiangshan 
18904736cbf7SLai Jiangshan /**
189160f5a4bcSLai Jiangshan  * worker_detach_from_pool() - detach a worker from its pool
189260f5a4bcSLai Jiangshan  * @worker: worker which is attached to its pool
189360f5a4bcSLai Jiangshan  *
18944736cbf7SLai Jiangshan  * Undo the attaching which had been done in worker_attach_to_pool().  The
18954736cbf7SLai Jiangshan  * caller worker shouldn't access to the pool after detached except it has
18964736cbf7SLai Jiangshan  * other reference to the pool.
189760f5a4bcSLai Jiangshan  */
1898a2d812a2STejun Heo static void worker_detach_from_pool(struct worker *worker)
189960f5a4bcSLai Jiangshan {
1900a2d812a2STejun Heo 	struct worker_pool *pool = worker->pool;
190160f5a4bcSLai Jiangshan 	struct completion *detach_completion = NULL;
190260f5a4bcSLai Jiangshan 
19031258fae7STejun Heo 	mutex_lock(&wq_pool_attach_mutex);
1904a2d812a2STejun Heo 
19055c25b5ffSPeter Zijlstra 	kthread_set_per_cpu(worker->task, -1);
1906da028469SLai Jiangshan 	list_del(&worker->node);
1907a2d812a2STejun Heo 	worker->pool = NULL;
1908a2d812a2STejun Heo 
1909da028469SLai Jiangshan 	if (list_empty(&pool->workers))
191060f5a4bcSLai Jiangshan 		detach_completion = pool->detach_completion;
19111258fae7STejun Heo 	mutex_unlock(&wq_pool_attach_mutex);
191260f5a4bcSLai Jiangshan 
1913b62c0751SLai Jiangshan 	/* clear leftover flags without pool->lock after it is detached */
1914b62c0751SLai Jiangshan 	worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1915b62c0751SLai Jiangshan 
191660f5a4bcSLai Jiangshan 	if (detach_completion)
191760f5a4bcSLai Jiangshan 		complete(detach_completion);
191860f5a4bcSLai Jiangshan }
191960f5a4bcSLai Jiangshan 
192060f5a4bcSLai Jiangshan /**
1921c34056a3STejun Heo  * create_worker - create a new workqueue worker
192263d95a91STejun Heo  * @pool: pool the new worker will belong to
1923c34056a3STejun Heo  *
1924051e1850SLai Jiangshan  * Create and start a new worker which is attached to @pool.
1925c34056a3STejun Heo  *
1926c34056a3STejun Heo  * CONTEXT:
1927c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
1928c34056a3STejun Heo  *
1929d185af30SYacine Belkadi  * Return:
1930c34056a3STejun Heo  * Pointer to the newly created worker.
1931c34056a3STejun Heo  */
1932bc2ae0f5STejun Heo static struct worker *create_worker(struct worker_pool *pool)
1933c34056a3STejun Heo {
1934e441b56fSZhen Lei 	struct worker *worker;
1935e441b56fSZhen Lei 	int id;
1936e3c916a4STejun Heo 	char id_buf[16];
1937c34056a3STejun Heo 
19387cda9aaeSLai Jiangshan 	/* ID is needed to determine kthread name */
1939e441b56fSZhen Lei 	id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
1940822d8405STejun Heo 	if (id < 0)
1941e441b56fSZhen Lei 		return NULL;
1942c34056a3STejun Heo 
1943f7537df5SLai Jiangshan 	worker = alloc_worker(pool->node);
1944c34056a3STejun Heo 	if (!worker)
1945c34056a3STejun Heo 		goto fail;
1946c34056a3STejun Heo 
1947c34056a3STejun Heo 	worker->id = id;
1948c34056a3STejun Heo 
194929c91e99STejun Heo 	if (pool->cpu >= 0)
1950e3c916a4STejun Heo 		snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1951e3c916a4STejun Heo 			 pool->attrs->nice < 0  ? "H" : "");
1952f3421797STejun Heo 	else
1953e3c916a4STejun Heo 		snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1954e3c916a4STejun Heo 
1955f3f90ad4STejun Heo 	worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1956e3c916a4STejun Heo 					      "kworker/%s", id_buf);
1957c34056a3STejun Heo 	if (IS_ERR(worker->task))
1958c34056a3STejun Heo 		goto fail;
1959c34056a3STejun Heo 
196091151228SOleg Nesterov 	set_user_nice(worker->task, pool->attrs->nice);
196125834c73SPeter Zijlstra 	kthread_bind_mask(worker->task, pool->attrs->cpumask);
196291151228SOleg Nesterov 
1963da028469SLai Jiangshan 	/* successful, attach the worker to the pool */
19644736cbf7SLai Jiangshan 	worker_attach_to_pool(worker, pool);
1965822d8405STejun Heo 
1966051e1850SLai Jiangshan 	/* start the newly created worker */
1967a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
1968051e1850SLai Jiangshan 	worker->pool->nr_workers++;
1969051e1850SLai Jiangshan 	worker_enter_idle(worker);
1970051e1850SLai Jiangshan 	wake_up_process(worker->task);
1971a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
1972051e1850SLai Jiangshan 
1973c34056a3STejun Heo 	return worker;
1974822d8405STejun Heo 
1975c34056a3STejun Heo fail:
1976e441b56fSZhen Lei 	ida_free(&pool->worker_ida, id);
1977c34056a3STejun Heo 	kfree(worker);
1978c34056a3STejun Heo 	return NULL;
1979c34056a3STejun Heo }
1980c34056a3STejun Heo 
1981793777bcSValentin Schneider static void unbind_worker(struct worker *worker)
1982793777bcSValentin Schneider {
1983793777bcSValentin Schneider 	lockdep_assert_held(&wq_pool_attach_mutex);
1984793777bcSValentin Schneider 
1985793777bcSValentin Schneider 	kthread_set_per_cpu(worker->task, -1);
1986793777bcSValentin Schneider 	if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
1987793777bcSValentin Schneider 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
1988793777bcSValentin Schneider 	else
1989793777bcSValentin Schneider 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
1990793777bcSValentin Schneider }
1991793777bcSValentin Schneider 
1992793777bcSValentin Schneider static void rebind_worker(struct worker *worker, struct worker_pool *pool)
1993793777bcSValentin Schneider {
1994793777bcSValentin Schneider 	kthread_set_per_cpu(worker->task, pool->cpu);
1995793777bcSValentin Schneider 	WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0);
1996793777bcSValentin Schneider }
1997793777bcSValentin Schneider 
1998c34056a3STejun Heo /**
1999c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
2000c34056a3STejun Heo  * @worker: worker to be destroyed
2001c34056a3STejun Heo  *
200273eb7fe7SLai Jiangshan  * Destroy @worker and adjust @pool stats accordingly.  The worker should
200373eb7fe7SLai Jiangshan  * be idle.
2004c8e55f36STejun Heo  *
2005c8e55f36STejun Heo  * CONTEXT:
2006a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
2007c34056a3STejun Heo  */
2008c34056a3STejun Heo static void destroy_worker(struct worker *worker)
2009c34056a3STejun Heo {
2010bd7bdd43STejun Heo 	struct worker_pool *pool = worker->pool;
2011c34056a3STejun Heo 
2012cd549687STejun Heo 	lockdep_assert_held(&pool->lock);
2013cd549687STejun Heo 
2014c34056a3STejun Heo 	/* sanity check frenzy */
20156183c009STejun Heo 	if (WARN_ON(worker->current_work) ||
201673eb7fe7SLai Jiangshan 	    WARN_ON(!list_empty(&worker->scheduled)) ||
201773eb7fe7SLai Jiangshan 	    WARN_ON(!(worker->flags & WORKER_IDLE)))
20186183c009STejun Heo 		return;
2019c34056a3STejun Heo 
2020bd7bdd43STejun Heo 	pool->nr_workers--;
2021bd7bdd43STejun Heo 	pool->nr_idle--;
2022c8e55f36STejun Heo 
2023c8e55f36STejun Heo 	list_del_init(&worker->entry);
2024cb444766STejun Heo 	worker->flags |= WORKER_DIE;
202560f5a4bcSLai Jiangshan 	wake_up_process(worker->task);
2026c34056a3STejun Heo }
2027c34056a3STejun Heo 
2028*3f959aa3SValentin Schneider /**
2029*3f959aa3SValentin Schneider  * idle_worker_timeout - check if some idle workers can now be deleted.
2030*3f959aa3SValentin Schneider  * @t: The pool's idle_timer that just expired
2031*3f959aa3SValentin Schneider  *
2032*3f959aa3SValentin Schneider  * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2033*3f959aa3SValentin Schneider  * worker_leave_idle(), as a worker flicking between idle and active while its
2034*3f959aa3SValentin Schneider  * pool is at the too_many_workers() tipping point would cause too much timer
2035*3f959aa3SValentin Schneider  * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2036*3f959aa3SValentin Schneider  * it expire and re-evaluate things from there.
2037*3f959aa3SValentin Schneider  */
203832a6c723SKees Cook static void idle_worker_timeout(struct timer_list *t)
2039e22bee78STejun Heo {
204032a6c723SKees Cook 	struct worker_pool *pool = from_timer(pool, t, idle_timer);
2041*3f959aa3SValentin Schneider 	bool do_cull = false;
2042*3f959aa3SValentin Schneider 
2043*3f959aa3SValentin Schneider 	if (work_pending(&pool->idle_cull_work))
2044*3f959aa3SValentin Schneider 		return;
2045*3f959aa3SValentin Schneider 
2046*3f959aa3SValentin Schneider 	raw_spin_lock_irq(&pool->lock);
2047*3f959aa3SValentin Schneider 
2048*3f959aa3SValentin Schneider 	if (too_many_workers(pool)) {
2049*3f959aa3SValentin Schneider 		struct worker *worker;
2050*3f959aa3SValentin Schneider 		unsigned long expires;
2051*3f959aa3SValentin Schneider 
2052*3f959aa3SValentin Schneider 		/* idle_list is kept in LIFO order, check the last one */
2053*3f959aa3SValentin Schneider 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2054*3f959aa3SValentin Schneider 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2055*3f959aa3SValentin Schneider 		do_cull = !time_before(jiffies, expires);
2056*3f959aa3SValentin Schneider 
2057*3f959aa3SValentin Schneider 		if (!do_cull)
2058*3f959aa3SValentin Schneider 			mod_timer(&pool->idle_timer, expires);
2059*3f959aa3SValentin Schneider 	}
2060*3f959aa3SValentin Schneider 	raw_spin_unlock_irq(&pool->lock);
2061*3f959aa3SValentin Schneider 
2062*3f959aa3SValentin Schneider 	if (do_cull)
2063*3f959aa3SValentin Schneider 		queue_work(system_unbound_wq, &pool->idle_cull_work);
2064*3f959aa3SValentin Schneider }
2065*3f959aa3SValentin Schneider 
2066*3f959aa3SValentin Schneider /**
2067*3f959aa3SValentin Schneider  * idle_cull_fn - cull workers that have been idle for too long.
2068*3f959aa3SValentin Schneider  * @work: the pool's work for handling these idle workers
2069*3f959aa3SValentin Schneider  *
2070*3f959aa3SValentin Schneider  * This goes through a pool's idle workers and gets rid of those that have been
2071*3f959aa3SValentin Schneider  * idle for at least IDLE_WORKER_TIMEOUT seconds.
2072*3f959aa3SValentin Schneider  */
2073*3f959aa3SValentin Schneider static void idle_cull_fn(struct work_struct *work)
2074*3f959aa3SValentin Schneider {
2075*3f959aa3SValentin Schneider 	struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
2076e22bee78STejun Heo 
2077a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
2078e22bee78STejun Heo 
20793347fc9fSLai Jiangshan 	while (too_many_workers(pool)) {
2080e22bee78STejun Heo 		struct worker *worker;
2081e22bee78STejun Heo 		unsigned long expires;
2082e22bee78STejun Heo 
208363d95a91STejun Heo 		worker = list_entry(pool->idle_list.prev, struct worker, entry);
2084e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2085e22bee78STejun Heo 
20863347fc9fSLai Jiangshan 		if (time_before(jiffies, expires)) {
208763d95a91STejun Heo 			mod_timer(&pool->idle_timer, expires);
20883347fc9fSLai Jiangshan 			break;
2089e22bee78STejun Heo 		}
20903347fc9fSLai Jiangshan 
20913347fc9fSLai Jiangshan 		destroy_worker(worker);
2092e22bee78STejun Heo 	}
2093e22bee78STejun Heo 
2094a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
2095e22bee78STejun Heo }
2096e22bee78STejun Heo 
2097493a1724STejun Heo static void send_mayday(struct work_struct *work)
2098e22bee78STejun Heo {
2099112202d9STejun Heo 	struct pool_workqueue *pwq = get_work_pwq(work);
2100112202d9STejun Heo 	struct workqueue_struct *wq = pwq->wq;
2101493a1724STejun Heo 
21022e109a28STejun Heo 	lockdep_assert_held(&wq_mayday_lock);
2103e22bee78STejun Heo 
2104493008a8STejun Heo 	if (!wq->rescuer)
2105493a1724STejun Heo 		return;
2106e22bee78STejun Heo 
2107e22bee78STejun Heo 	/* mayday mayday mayday */
2108493a1724STejun Heo 	if (list_empty(&pwq->mayday_node)) {
210977668c8bSLai Jiangshan 		/*
211077668c8bSLai Jiangshan 		 * If @pwq is for an unbound wq, its base ref may be put at
211177668c8bSLai Jiangshan 		 * any time due to an attribute change.  Pin @pwq until the
211277668c8bSLai Jiangshan 		 * rescuer is done with it.
211377668c8bSLai Jiangshan 		 */
211477668c8bSLai Jiangshan 		get_pwq(pwq);
2115493a1724STejun Heo 		list_add_tail(&pwq->mayday_node, &wq->maydays);
2116e22bee78STejun Heo 		wake_up_process(wq->rescuer->task);
2117493a1724STejun Heo 	}
2118e22bee78STejun Heo }
2119e22bee78STejun Heo 
212032a6c723SKees Cook static void pool_mayday_timeout(struct timer_list *t)
2121e22bee78STejun Heo {
212232a6c723SKees Cook 	struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2123e22bee78STejun Heo 	struct work_struct *work;
2124e22bee78STejun Heo 
2125a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
2126a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock(&wq_mayday_lock);		/* for wq->maydays */
2127e22bee78STejun Heo 
212863d95a91STejun Heo 	if (need_to_create_worker(pool)) {
2129e22bee78STejun Heo 		/*
2130e22bee78STejun Heo 		 * We've been trying to create a new worker but
2131e22bee78STejun Heo 		 * haven't been successful.  We might be hitting an
2132e22bee78STejun Heo 		 * allocation deadlock.  Send distress signals to
2133e22bee78STejun Heo 		 * rescuers.
2134e22bee78STejun Heo 		 */
213563d95a91STejun Heo 		list_for_each_entry(work, &pool->worklist, entry)
2136e22bee78STejun Heo 			send_mayday(work);
2137e22bee78STejun Heo 	}
2138e22bee78STejun Heo 
2139a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock(&wq_mayday_lock);
2140a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
2141e22bee78STejun Heo 
214263d95a91STejun Heo 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2143e22bee78STejun Heo }
2144e22bee78STejun Heo 
2145e22bee78STejun Heo /**
2146e22bee78STejun Heo  * maybe_create_worker - create a new worker if necessary
214763d95a91STejun Heo  * @pool: pool to create a new worker for
2148e22bee78STejun Heo  *
214963d95a91STejun Heo  * Create a new worker for @pool if necessary.  @pool is guaranteed to
2150e22bee78STejun Heo  * have at least one idle worker on return from this function.  If
2151e22bee78STejun Heo  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
215263d95a91STejun Heo  * sent to all rescuers with works scheduled on @pool to resolve
2153e22bee78STejun Heo  * possible allocation deadlock.
2154e22bee78STejun Heo  *
2155c5aa87bbSTejun Heo  * On return, need_to_create_worker() is guaranteed to be %false and
2156c5aa87bbSTejun Heo  * may_start_working() %true.
2157e22bee78STejun Heo  *
2158e22bee78STejun Heo  * LOCKING:
2159a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2160e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.  Called only from
2161e22bee78STejun Heo  * manager.
2162e22bee78STejun Heo  */
216329187a9eSTejun Heo static void maybe_create_worker(struct worker_pool *pool)
2164d565ed63STejun Heo __releases(&pool->lock)
2165d565ed63STejun Heo __acquires(&pool->lock)
2166e22bee78STejun Heo {
2167e22bee78STejun Heo restart:
2168a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
21699f9c2364STejun Heo 
2170e22bee78STejun Heo 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
217163d95a91STejun Heo 	mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2172e22bee78STejun Heo 
2173e22bee78STejun Heo 	while (true) {
2174051e1850SLai Jiangshan 		if (create_worker(pool) || !need_to_create_worker(pool))
2175e22bee78STejun Heo 			break;
2176e22bee78STejun Heo 
2177e212f361SLai Jiangshan 		schedule_timeout_interruptible(CREATE_COOLDOWN);
21789f9c2364STejun Heo 
217963d95a91STejun Heo 		if (!need_to_create_worker(pool))
2180e22bee78STejun Heo 			break;
2181e22bee78STejun Heo 	}
2182e22bee78STejun Heo 
218363d95a91STejun Heo 	del_timer_sync(&pool->mayday_timer);
2184a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
2185051e1850SLai Jiangshan 	/*
2186051e1850SLai Jiangshan 	 * This is necessary even after a new worker was just successfully
2187051e1850SLai Jiangshan 	 * created as @pool->lock was dropped and the new worker might have
2188051e1850SLai Jiangshan 	 * already become busy.
2189051e1850SLai Jiangshan 	 */
219063d95a91STejun Heo 	if (need_to_create_worker(pool))
2191e22bee78STejun Heo 		goto restart;
2192e22bee78STejun Heo }
2193e22bee78STejun Heo 
2194e22bee78STejun Heo /**
2195e22bee78STejun Heo  * manage_workers - manage worker pool
2196e22bee78STejun Heo  * @worker: self
2197e22bee78STejun Heo  *
2198706026c2STejun Heo  * Assume the manager role and manage the worker pool @worker belongs
2199e22bee78STejun Heo  * to.  At any given time, there can be only zero or one manager per
2200706026c2STejun Heo  * pool.  The exclusion is handled automatically by this function.
2201e22bee78STejun Heo  *
2202e22bee78STejun Heo  * The caller can safely start processing works on false return.  On
2203e22bee78STejun Heo  * true return, it's guaranteed that need_to_create_worker() is false
2204e22bee78STejun Heo  * and may_start_working() is true.
2205e22bee78STejun Heo  *
2206e22bee78STejun Heo  * CONTEXT:
2207a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2208e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.
2209e22bee78STejun Heo  *
2210d185af30SYacine Belkadi  * Return:
221129187a9eSTejun Heo  * %false if the pool doesn't need management and the caller can safely
221229187a9eSTejun Heo  * start processing works, %true if management function was performed and
221329187a9eSTejun Heo  * the conditions that the caller verified before calling the function may
221429187a9eSTejun Heo  * no longer be true.
2215e22bee78STejun Heo  */
2216e22bee78STejun Heo static bool manage_workers(struct worker *worker)
2217e22bee78STejun Heo {
221863d95a91STejun Heo 	struct worker_pool *pool = worker->pool;
2219e22bee78STejun Heo 
2220692b4825STejun Heo 	if (pool->flags & POOL_MANAGER_ACTIVE)
222129187a9eSTejun Heo 		return false;
2222692b4825STejun Heo 
2223692b4825STejun Heo 	pool->flags |= POOL_MANAGER_ACTIVE;
22242607d7a6STejun Heo 	pool->manager = worker;
2225e22bee78STejun Heo 
222629187a9eSTejun Heo 	maybe_create_worker(pool);
2227e22bee78STejun Heo 
22282607d7a6STejun Heo 	pool->manager = NULL;
2229692b4825STejun Heo 	pool->flags &= ~POOL_MANAGER_ACTIVE;
2230d8bb65abSSebastian Andrzej Siewior 	rcuwait_wake_up(&manager_wait);
223129187a9eSTejun Heo 	return true;
2232e22bee78STejun Heo }
2233e22bee78STejun Heo 
2234a62428c0STejun Heo /**
2235a62428c0STejun Heo  * process_one_work - process single work
2236c34056a3STejun Heo  * @worker: self
2237a62428c0STejun Heo  * @work: work to process
2238a62428c0STejun Heo  *
2239a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
2240a62428c0STejun Heo  * process a single work including synchronization against and
2241a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
2242a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
2243a62428c0STejun Heo  * call this function to process a work.
2244a62428c0STejun Heo  *
2245a62428c0STejun Heo  * CONTEXT:
2246a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2247a62428c0STejun Heo  */
2248c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
2249d565ed63STejun Heo __releases(&pool->lock)
2250d565ed63STejun Heo __acquires(&pool->lock)
22511da177e4SLinus Torvalds {
2252112202d9STejun Heo 	struct pool_workqueue *pwq = get_work_pwq(work);
2253bd7bdd43STejun Heo 	struct worker_pool *pool = worker->pool;
2254112202d9STejun Heo 	bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2255c4560c2cSLai Jiangshan 	unsigned long work_data;
22567e11629dSTejun Heo 	struct worker *collision;
22574e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
22584e6045f1SJohannes Berg 	/*
2259a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
2260a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
2261a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
2262a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
2263a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
22644e6045f1SJohannes Berg 	 */
22654d82a1deSPeter Zijlstra 	struct lockdep_map lockdep_map;
22664d82a1deSPeter Zijlstra 
22674d82a1deSPeter Zijlstra 	lockdep_copy_map(&lockdep_map, &work->lockdep_map);
22684e6045f1SJohannes Berg #endif
2269807407c0SLai Jiangshan 	/* ensure we're on the correct CPU */
227085327af6SLai Jiangshan 	WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2271ec22ca5eSTejun Heo 		     raw_smp_processor_id() != pool->cpu);
227225511a47STejun Heo 
22737e11629dSTejun Heo 	/*
22747e11629dSTejun Heo 	 * A single work shouldn't be executed concurrently by
22757e11629dSTejun Heo 	 * multiple workers on a single cpu.  Check whether anyone is
22767e11629dSTejun Heo 	 * already processing the work.  If so, defer the work to the
22777e11629dSTejun Heo 	 * currently executing one.
22787e11629dSTejun Heo 	 */
2279c9e7cf27STejun Heo 	collision = find_worker_executing_work(pool, work);
22807e11629dSTejun Heo 	if (unlikely(collision)) {
22817e11629dSTejun Heo 		move_linked_works(work, &collision->scheduled, NULL);
22827e11629dSTejun Heo 		return;
22837e11629dSTejun Heo 	}
22841da177e4SLinus Torvalds 
22858930cabaSTejun Heo 	/* claim and dequeue */
22861da177e4SLinus Torvalds 	debug_work_deactivate(work);
2287c9e7cf27STejun Heo 	hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2288c34056a3STejun Heo 	worker->current_work = work;
2289a2c1c57bSTejun Heo 	worker->current_func = work->func;
2290112202d9STejun Heo 	worker->current_pwq = pwq;
2291c4560c2cSLai Jiangshan 	work_data = *work_data_bits(work);
2292d812796eSLai Jiangshan 	worker->current_color = get_work_color(work_data);
22937a22ad75STejun Heo 
22948bf89593STejun Heo 	/*
22958bf89593STejun Heo 	 * Record wq name for cmdline and debug reporting, may get
22968bf89593STejun Heo 	 * overridden through set_worker_desc().
22978bf89593STejun Heo 	 */
22988bf89593STejun Heo 	strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
22998bf89593STejun Heo 
2300a62428c0STejun Heo 	list_del_init(&work->entry);
2301a62428c0STejun Heo 
2302649027d7STejun Heo 	/*
2303228f1d00SLai Jiangshan 	 * CPU intensive works don't participate in concurrency management.
2304228f1d00SLai Jiangshan 	 * They're the scheduler's responsibility.  This takes @worker out
2305228f1d00SLai Jiangshan 	 * of concurrency management and the next code block will chain
2306228f1d00SLai Jiangshan 	 * execution of the pending work items.
2307fb0e7bebSTejun Heo 	 */
2308fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
2309228f1d00SLai Jiangshan 		worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2310fb0e7bebSTejun Heo 
2311974271c4STejun Heo 	/*
2312a489a03eSLai Jiangshan 	 * Wake up another worker if necessary.  The condition is always
2313a489a03eSLai Jiangshan 	 * false for normal per-cpu workers since nr_running would always
2314a489a03eSLai Jiangshan 	 * be >= 1 at this point.  This is used to chain execution of the
2315a489a03eSLai Jiangshan 	 * pending work items for WORKER_NOT_RUNNING workers such as the
2316228f1d00SLai Jiangshan 	 * UNBOUND and CPU_INTENSIVE ones.
2317974271c4STejun Heo 	 */
2318a489a03eSLai Jiangshan 	if (need_more_worker(pool))
231963d95a91STejun Heo 		wake_up_worker(pool);
2320974271c4STejun Heo 
23218930cabaSTejun Heo 	/*
23227c3eed5cSTejun Heo 	 * Record the last pool and clear PENDING which should be the last
2323d565ed63STejun Heo 	 * update to @work.  Also, do this inside @pool->lock so that
232423657bb1STejun Heo 	 * PENDING and queued state changes happen together while IRQ is
232523657bb1STejun Heo 	 * disabled.
23268930cabaSTejun Heo 	 */
23277c3eed5cSTejun Heo 	set_work_pool_and_clear_pending(work, pool->id);
23281da177e4SLinus Torvalds 
2329a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
2330365970a1SDavid Howells 
2331a1d14934SPeter Zijlstra 	lock_map_acquire(&pwq->wq->lockdep_map);
23323295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
2333e6f3faa7SPeter Zijlstra 	/*
2334f52be570SPeter Zijlstra 	 * Strictly speaking we should mark the invariant state without holding
2335f52be570SPeter Zijlstra 	 * any locks, that is, before these two lock_map_acquire()'s.
2336e6f3faa7SPeter Zijlstra 	 *
2337e6f3faa7SPeter Zijlstra 	 * However, that would result in:
2338e6f3faa7SPeter Zijlstra 	 *
2339e6f3faa7SPeter Zijlstra 	 *   A(W1)
2340e6f3faa7SPeter Zijlstra 	 *   WFC(C)
2341e6f3faa7SPeter Zijlstra 	 *		A(W1)
2342e6f3faa7SPeter Zijlstra 	 *		C(C)
2343e6f3faa7SPeter Zijlstra 	 *
2344e6f3faa7SPeter Zijlstra 	 * Which would create W1->C->W1 dependencies, even though there is no
2345e6f3faa7SPeter Zijlstra 	 * actual deadlock possible. There are two solutions, using a
2346e6f3faa7SPeter Zijlstra 	 * read-recursive acquire on the work(queue) 'locks', but this will then
2347f52be570SPeter Zijlstra 	 * hit the lockdep limitation on recursive locks, or simply discard
2348e6f3faa7SPeter Zijlstra 	 * these locks.
2349e6f3faa7SPeter Zijlstra 	 *
2350e6f3faa7SPeter Zijlstra 	 * AFAICT there is no possible deadlock scenario between the
2351e6f3faa7SPeter Zijlstra 	 * flush_work() and complete() primitives (except for single-threaded
2352e6f3faa7SPeter Zijlstra 	 * workqueues), so hiding them isn't a problem.
2353e6f3faa7SPeter Zijlstra 	 */
2354f52be570SPeter Zijlstra 	lockdep_invariant_state(true);
2355e36c886aSArjan van de Ven 	trace_workqueue_execute_start(work);
2356a2c1c57bSTejun Heo 	worker->current_func(work);
2357e36c886aSArjan van de Ven 	/*
2358e36c886aSArjan van de Ven 	 * While we must be careful to not use "work" after this, the trace
2359e36c886aSArjan van de Ven 	 * point will only record its address.
2360e36c886aSArjan van de Ven 	 */
23611c5da0ecSDaniel Jordan 	trace_workqueue_execute_end(work, worker->current_func);
23623295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
2363112202d9STejun Heo 	lock_map_release(&pwq->wq->lockdep_map);
23641da177e4SLinus Torvalds 
2365d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2366044c782cSValentin Ilie 		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2367d75f773cSSakari Ailus 		       "     last function: %ps\n",
2368a2c1c57bSTejun Heo 		       current->comm, preempt_count(), task_pid_nr(current),
2369a2c1c57bSTejun Heo 		       worker->current_func);
2370d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
2371d5abe669SPeter Zijlstra 		dump_stack();
2372d5abe669SPeter Zijlstra 	}
2373d5abe669SPeter Zijlstra 
2374b22ce278STejun Heo 	/*
2375025f50f3SSebastian Andrzej Siewior 	 * The following prevents a kworker from hogging CPU on !PREEMPTION
2376b22ce278STejun Heo 	 * kernels, where a requeueing work item waiting for something to
2377b22ce278STejun Heo 	 * happen could deadlock with stop_machine as such work item could
2378b22ce278STejun Heo 	 * indefinitely requeue itself while all other CPUs are trapped in
2379789cbbecSJoe Lawrence 	 * stop_machine. At the same time, report a quiescent RCU state so
2380789cbbecSJoe Lawrence 	 * the same condition doesn't freeze RCU.
2381b22ce278STejun Heo 	 */
2382a7e6425eSPaul E. McKenney 	cond_resched();
2383b22ce278STejun Heo 
2384a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
2385a62428c0STejun Heo 
2386fb0e7bebSTejun Heo 	/* clear cpu intensive status */
2387fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
2388fb0e7bebSTejun Heo 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2389fb0e7bebSTejun Heo 
23901b69ac6bSJohannes Weiner 	/* tag the worker for identification in schedule() */
23911b69ac6bSJohannes Weiner 	worker->last_func = worker->current_func;
23921b69ac6bSJohannes Weiner 
2393a62428c0STejun Heo 	/* we're done with it, release */
239442f8570fSSasha Levin 	hash_del(&worker->hentry);
2395c34056a3STejun Heo 	worker->current_work = NULL;
2396a2c1c57bSTejun Heo 	worker->current_func = NULL;
2397112202d9STejun Heo 	worker->current_pwq = NULL;
2398d812796eSLai Jiangshan 	worker->current_color = INT_MAX;
2399c4560c2cSLai Jiangshan 	pwq_dec_nr_in_flight(pwq, work_data);
24001da177e4SLinus Torvalds }
24011da177e4SLinus Torvalds 
2402affee4b2STejun Heo /**
2403affee4b2STejun Heo  * process_scheduled_works - process scheduled works
2404affee4b2STejun Heo  * @worker: self
2405affee4b2STejun Heo  *
2406affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
2407affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
2408affee4b2STejun Heo  * fetches a work from the top and executes it.
2409affee4b2STejun Heo  *
2410affee4b2STejun Heo  * CONTEXT:
2411a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2412affee4b2STejun Heo  * multiple times.
2413affee4b2STejun Heo  */
2414affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
24151da177e4SLinus Torvalds {
2416affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
2417affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
2418a62428c0STejun Heo 						struct work_struct, entry);
2419c34056a3STejun Heo 		process_one_work(worker, work);
2420a62428c0STejun Heo 	}
24211da177e4SLinus Torvalds }
24221da177e4SLinus Torvalds 
2423197f6accSTejun Heo static void set_pf_worker(bool val)
2424197f6accSTejun Heo {
2425197f6accSTejun Heo 	mutex_lock(&wq_pool_attach_mutex);
2426197f6accSTejun Heo 	if (val)
2427197f6accSTejun Heo 		current->flags |= PF_WQ_WORKER;
2428197f6accSTejun Heo 	else
2429197f6accSTejun Heo 		current->flags &= ~PF_WQ_WORKER;
2430197f6accSTejun Heo 	mutex_unlock(&wq_pool_attach_mutex);
2431197f6accSTejun Heo }
2432197f6accSTejun Heo 
24334690c4abSTejun Heo /**
24344690c4abSTejun Heo  * worker_thread - the worker thread function
2435c34056a3STejun Heo  * @__worker: self
24364690c4abSTejun Heo  *
2437c5aa87bbSTejun Heo  * The worker thread function.  All workers belong to a worker_pool -
2438c5aa87bbSTejun Heo  * either a per-cpu one or dynamic unbound one.  These workers process all
2439c5aa87bbSTejun Heo  * work items regardless of their specific target workqueue.  The only
2440c5aa87bbSTejun Heo  * exception is work items which belong to workqueues with a rescuer which
2441c5aa87bbSTejun Heo  * will be explained in rescuer_thread().
2442d185af30SYacine Belkadi  *
2443d185af30SYacine Belkadi  * Return: 0
24444690c4abSTejun Heo  */
2445c34056a3STejun Heo static int worker_thread(void *__worker)
24461da177e4SLinus Torvalds {
2447c34056a3STejun Heo 	struct worker *worker = __worker;
2448bd7bdd43STejun Heo 	struct worker_pool *pool = worker->pool;
24491da177e4SLinus Torvalds 
2450e22bee78STejun Heo 	/* tell the scheduler that this is a workqueue worker */
2451197f6accSTejun Heo 	set_pf_worker(true);
2452c8e55f36STejun Heo woke_up:
2453a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
2454affee4b2STejun Heo 
2455a9ab775bSTejun Heo 	/* am I supposed to die? */
2456a9ab775bSTejun Heo 	if (unlikely(worker->flags & WORKER_DIE)) {
2457a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pool->lock);
2458a9ab775bSTejun Heo 		WARN_ON_ONCE(!list_empty(&worker->entry));
2459197f6accSTejun Heo 		set_pf_worker(false);
246060f5a4bcSLai Jiangshan 
246160f5a4bcSLai Jiangshan 		set_task_comm(worker->task, "kworker/dying");
2462e441b56fSZhen Lei 		ida_free(&pool->worker_ida, worker->id);
2463a2d812a2STejun Heo 		worker_detach_from_pool(worker);
246460f5a4bcSLai Jiangshan 		kfree(worker);
2465c8e55f36STejun Heo 		return 0;
2466c8e55f36STejun Heo 	}
2467c8e55f36STejun Heo 
2468c8e55f36STejun Heo 	worker_leave_idle(worker);
2469db7bccf4STejun Heo recheck:
2470e22bee78STejun Heo 	/* no more worker necessary? */
247163d95a91STejun Heo 	if (!need_more_worker(pool))
2472e22bee78STejun Heo 		goto sleep;
2473e22bee78STejun Heo 
2474e22bee78STejun Heo 	/* do we need to manage? */
247563d95a91STejun Heo 	if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2476e22bee78STejun Heo 		goto recheck;
2477e22bee78STejun Heo 
2478c8e55f36STejun Heo 	/*
2479c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
2480c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
2481c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
2482c8e55f36STejun Heo 	 */
24836183c009STejun Heo 	WARN_ON_ONCE(!list_empty(&worker->scheduled));
2484c8e55f36STejun Heo 
2485e22bee78STejun Heo 	/*
2486a9ab775bSTejun Heo 	 * Finish PREP stage.  We're guaranteed to have at least one idle
2487a9ab775bSTejun Heo 	 * worker or that someone else has already assumed the manager
2488a9ab775bSTejun Heo 	 * role.  This is where @worker starts participating in concurrency
2489a9ab775bSTejun Heo 	 * management if applicable and concurrency management is restored
2490a9ab775bSTejun Heo 	 * after being rebound.  See rebind_workers() for details.
2491e22bee78STejun Heo 	 */
2492a9ab775bSTejun Heo 	worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2493e22bee78STejun Heo 
2494e22bee78STejun Heo 	do {
2495affee4b2STejun Heo 		struct work_struct *work =
2496bd7bdd43STejun Heo 			list_first_entry(&pool->worklist,
2497affee4b2STejun Heo 					 struct work_struct, entry);
2498affee4b2STejun Heo 
249982607adcSTejun Heo 		pool->watchdog_ts = jiffies;
250082607adcSTejun Heo 
2501c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2502affee4b2STejun Heo 			/* optimization path, not strictly necessary */
2503affee4b2STejun Heo 			process_one_work(worker, work);
2504affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
2505affee4b2STejun Heo 				process_scheduled_works(worker);
2506affee4b2STejun Heo 		} else {
2507c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
2508affee4b2STejun Heo 			process_scheduled_works(worker);
2509affee4b2STejun Heo 		}
251063d95a91STejun Heo 	} while (keep_working(pool));
2511affee4b2STejun Heo 
2512228f1d00SLai Jiangshan 	worker_set_flags(worker, WORKER_PREP);
2513d313dd85STejun Heo sleep:
2514c8e55f36STejun Heo 	/*
2515d565ed63STejun Heo 	 * pool->lock is held and there's no work to process and no need to
2516d565ed63STejun Heo 	 * manage, sleep.  Workers are woken up only while holding
2517d565ed63STejun Heo 	 * pool->lock or from local cpu, so setting the current state
2518d565ed63STejun Heo 	 * before releasing pool->lock is enough to prevent losing any
2519d565ed63STejun Heo 	 * event.
2520c8e55f36STejun Heo 	 */
2521c8e55f36STejun Heo 	worker_enter_idle(worker);
2522c5a94a61SPeter Zijlstra 	__set_current_state(TASK_IDLE);
2523a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
25241da177e4SLinus Torvalds 	schedule();
2525c8e55f36STejun Heo 	goto woke_up;
25261da177e4SLinus Torvalds }
25271da177e4SLinus Torvalds 
2528e22bee78STejun Heo /**
2529e22bee78STejun Heo  * rescuer_thread - the rescuer thread function
2530111c225aSTejun Heo  * @__rescuer: self
2531e22bee78STejun Heo  *
2532e22bee78STejun Heo  * Workqueue rescuer thread function.  There's one rescuer for each
2533493008a8STejun Heo  * workqueue which has WQ_MEM_RECLAIM set.
2534e22bee78STejun Heo  *
2535706026c2STejun Heo  * Regular work processing on a pool may block trying to create a new
2536e22bee78STejun Heo  * worker which uses GFP_KERNEL allocation which has slight chance of
2537e22bee78STejun Heo  * developing into deadlock if some works currently on the same queue
2538e22bee78STejun Heo  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2539e22bee78STejun Heo  * the problem rescuer solves.
2540e22bee78STejun Heo  *
2541706026c2STejun Heo  * When such condition is possible, the pool summons rescuers of all
2542706026c2STejun Heo  * workqueues which have works queued on the pool and let them process
2543e22bee78STejun Heo  * those works so that forward progress can be guaranteed.
2544e22bee78STejun Heo  *
2545e22bee78STejun Heo  * This should happen rarely.
2546d185af30SYacine Belkadi  *
2547d185af30SYacine Belkadi  * Return: 0
2548e22bee78STejun Heo  */
2549111c225aSTejun Heo static int rescuer_thread(void *__rescuer)
2550e22bee78STejun Heo {
2551111c225aSTejun Heo 	struct worker *rescuer = __rescuer;
2552111c225aSTejun Heo 	struct workqueue_struct *wq = rescuer->rescue_wq;
2553e22bee78STejun Heo 	struct list_head *scheduled = &rescuer->scheduled;
25544d595b86SLai Jiangshan 	bool should_stop;
2555e22bee78STejun Heo 
2556e22bee78STejun Heo 	set_user_nice(current, RESCUER_NICE_LEVEL);
2557111c225aSTejun Heo 
2558111c225aSTejun Heo 	/*
2559111c225aSTejun Heo 	 * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2560111c225aSTejun Heo 	 * doesn't participate in concurrency management.
2561111c225aSTejun Heo 	 */
2562197f6accSTejun Heo 	set_pf_worker(true);
2563e22bee78STejun Heo repeat:
2564c5a94a61SPeter Zijlstra 	set_current_state(TASK_IDLE);
25651da177e4SLinus Torvalds 
25664d595b86SLai Jiangshan 	/*
25674d595b86SLai Jiangshan 	 * By the time the rescuer is requested to stop, the workqueue
25684d595b86SLai Jiangshan 	 * shouldn't have any work pending, but @wq->maydays may still have
25694d595b86SLai Jiangshan 	 * pwq(s) queued.  This can happen by non-rescuer workers consuming
25704d595b86SLai Jiangshan 	 * all the work items before the rescuer got to them.  Go through
25714d595b86SLai Jiangshan 	 * @wq->maydays processing before acting on should_stop so that the
25724d595b86SLai Jiangshan 	 * list is always empty on exit.
25734d595b86SLai Jiangshan 	 */
25744d595b86SLai Jiangshan 	should_stop = kthread_should_stop();
25751da177e4SLinus Torvalds 
2576493a1724STejun Heo 	/* see whether any pwq is asking for help */
2577a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&wq_mayday_lock);
2578493a1724STejun Heo 
2579493a1724STejun Heo 	while (!list_empty(&wq->maydays)) {
2580493a1724STejun Heo 		struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2581493a1724STejun Heo 					struct pool_workqueue, mayday_node);
2582112202d9STejun Heo 		struct worker_pool *pool = pwq->pool;
2583e22bee78STejun Heo 		struct work_struct *work, *n;
258482607adcSTejun Heo 		bool first = true;
2585e22bee78STejun Heo 
2586e22bee78STejun Heo 		__set_current_state(TASK_RUNNING);
2587493a1724STejun Heo 		list_del_init(&pwq->mayday_node);
2588493a1724STejun Heo 
2589a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&wq_mayday_lock);
2590e22bee78STejun Heo 
259151697d39SLai Jiangshan 		worker_attach_to_pool(rescuer, pool);
259251697d39SLai Jiangshan 
2593a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&pool->lock);
2594e22bee78STejun Heo 
2595e22bee78STejun Heo 		/*
2596e22bee78STejun Heo 		 * Slurp in all works issued via this workqueue and
2597e22bee78STejun Heo 		 * process'em.
2598e22bee78STejun Heo 		 */
25990479c8c5STejun Heo 		WARN_ON_ONCE(!list_empty(scheduled));
260082607adcSTejun Heo 		list_for_each_entry_safe(work, n, &pool->worklist, entry) {
260182607adcSTejun Heo 			if (get_work_pwq(work) == pwq) {
260282607adcSTejun Heo 				if (first)
260382607adcSTejun Heo 					pool->watchdog_ts = jiffies;
2604e22bee78STejun Heo 				move_linked_works(work, scheduled, &n);
260582607adcSTejun Heo 			}
260682607adcSTejun Heo 			first = false;
260782607adcSTejun Heo 		}
2608e22bee78STejun Heo 
2609008847f6SNeilBrown 		if (!list_empty(scheduled)) {
2610e22bee78STejun Heo 			process_scheduled_works(rescuer);
26117576958aSTejun Heo 
26127576958aSTejun Heo 			/*
2613008847f6SNeilBrown 			 * The above execution of rescued work items could
2614008847f6SNeilBrown 			 * have created more to rescue through
2615f97a4a1aSLai Jiangshan 			 * pwq_activate_first_inactive() or chained
2616008847f6SNeilBrown 			 * queueing.  Let's put @pwq back on mayday list so
2617008847f6SNeilBrown 			 * that such back-to-back work items, which may be
2618008847f6SNeilBrown 			 * being used to relieve memory pressure, don't
2619008847f6SNeilBrown 			 * incur MAYDAY_INTERVAL delay inbetween.
2620008847f6SNeilBrown 			 */
26214f3f4cf3SLai Jiangshan 			if (pwq->nr_active && need_to_create_worker(pool)) {
2622a9b8a985SSebastian Andrzej Siewior 				raw_spin_lock(&wq_mayday_lock);
2623e66b39afSTejun Heo 				/*
2624e66b39afSTejun Heo 				 * Queue iff we aren't racing destruction
2625e66b39afSTejun Heo 				 * and somebody else hasn't queued it already.
2626e66b39afSTejun Heo 				 */
2627e66b39afSTejun Heo 				if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2628008847f6SNeilBrown 					get_pwq(pwq);
2629e66b39afSTejun Heo 					list_add_tail(&pwq->mayday_node, &wq->maydays);
2630e66b39afSTejun Heo 				}
2631a9b8a985SSebastian Andrzej Siewior 				raw_spin_unlock(&wq_mayday_lock);
2632008847f6SNeilBrown 			}
2633008847f6SNeilBrown 		}
2634008847f6SNeilBrown 
2635008847f6SNeilBrown 		/*
263677668c8bSLai Jiangshan 		 * Put the reference grabbed by send_mayday().  @pool won't
263713b1d625SLai Jiangshan 		 * go away while we're still attached to it.
263877668c8bSLai Jiangshan 		 */
263977668c8bSLai Jiangshan 		put_pwq(pwq);
264077668c8bSLai Jiangshan 
264177668c8bSLai Jiangshan 		/*
2642d8ca83e6SLai Jiangshan 		 * Leave this pool.  If need_more_worker() is %true, notify a
26437576958aSTejun Heo 		 * regular worker; otherwise, we end up with 0 concurrency
26447576958aSTejun Heo 		 * and stalling the execution.
26457576958aSTejun Heo 		 */
2646d8ca83e6SLai Jiangshan 		if (need_more_worker(pool))
264763d95a91STejun Heo 			wake_up_worker(pool);
26487576958aSTejun Heo 
2649a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pool->lock);
265013b1d625SLai Jiangshan 
2651a2d812a2STejun Heo 		worker_detach_from_pool(rescuer);
265213b1d625SLai Jiangshan 
2653a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&wq_mayday_lock);
26541da177e4SLinus Torvalds 	}
26551da177e4SLinus Torvalds 
2656a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&wq_mayday_lock);
2657493a1724STejun Heo 
26584d595b86SLai Jiangshan 	if (should_stop) {
26594d595b86SLai Jiangshan 		__set_current_state(TASK_RUNNING);
2660197f6accSTejun Heo 		set_pf_worker(false);
26614d595b86SLai Jiangshan 		return 0;
26624d595b86SLai Jiangshan 	}
26634d595b86SLai Jiangshan 
2664111c225aSTejun Heo 	/* rescuers should never participate in concurrency management */
2665111c225aSTejun Heo 	WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2666e22bee78STejun Heo 	schedule();
2667e22bee78STejun Heo 	goto repeat;
26681da177e4SLinus Torvalds }
26691da177e4SLinus Torvalds 
2670fca839c0STejun Heo /**
2671fca839c0STejun Heo  * check_flush_dependency - check for flush dependency sanity
2672fca839c0STejun Heo  * @target_wq: workqueue being flushed
2673fca839c0STejun Heo  * @target_work: work item being flushed (NULL for workqueue flushes)
2674fca839c0STejun Heo  *
2675fca839c0STejun Heo  * %current is trying to flush the whole @target_wq or @target_work on it.
2676fca839c0STejun Heo  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2677fca839c0STejun Heo  * reclaiming memory or running on a workqueue which doesn't have
2678fca839c0STejun Heo  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2679fca839c0STejun Heo  * a deadlock.
2680fca839c0STejun Heo  */
2681fca839c0STejun Heo static void check_flush_dependency(struct workqueue_struct *target_wq,
2682fca839c0STejun Heo 				   struct work_struct *target_work)
2683fca839c0STejun Heo {
2684fca839c0STejun Heo 	work_func_t target_func = target_work ? target_work->func : NULL;
2685fca839c0STejun Heo 	struct worker *worker;
2686fca839c0STejun Heo 
2687fca839c0STejun Heo 	if (target_wq->flags & WQ_MEM_RECLAIM)
2688fca839c0STejun Heo 		return;
2689fca839c0STejun Heo 
2690fca839c0STejun Heo 	worker = current_wq_worker();
2691fca839c0STejun Heo 
2692fca839c0STejun Heo 	WARN_ONCE(current->flags & PF_MEMALLOC,
2693d75f773cSSakari Ailus 		  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2694fca839c0STejun Heo 		  current->pid, current->comm, target_wq->name, target_func);
269523d11a58STejun Heo 	WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
269623d11a58STejun Heo 			      (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2697d75f773cSSakari Ailus 		  "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2698fca839c0STejun Heo 		  worker->current_pwq->wq->name, worker->current_func,
2699fca839c0STejun Heo 		  target_wq->name, target_func);
2700fca839c0STejun Heo }
2701fca839c0STejun Heo 
2702fc2e4d70SOleg Nesterov struct wq_barrier {
2703fc2e4d70SOleg Nesterov 	struct work_struct	work;
2704fc2e4d70SOleg Nesterov 	struct completion	done;
27052607d7a6STejun Heo 	struct task_struct	*task;	/* purely informational */
2706fc2e4d70SOleg Nesterov };
2707fc2e4d70SOleg Nesterov 
2708fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
2709fc2e4d70SOleg Nesterov {
2710fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2711fc2e4d70SOleg Nesterov 	complete(&barr->done);
2712fc2e4d70SOleg Nesterov }
2713fc2e4d70SOleg Nesterov 
27144690c4abSTejun Heo /**
27154690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
2716112202d9STejun Heo  * @pwq: pwq to insert barrier into
27174690c4abSTejun Heo  * @barr: wq_barrier to insert
2718affee4b2STejun Heo  * @target: target work to attach @barr to
2719affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
27204690c4abSTejun Heo  *
2721affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
2722affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
2723affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
2724affee4b2STejun Heo  * cpu.
2725affee4b2STejun Heo  *
2726affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
2727affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
2728affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
2729affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
2730affee4b2STejun Heo  * after a work with LINKED flag set.
2731affee4b2STejun Heo  *
2732affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
2733112202d9STejun Heo  * underneath us, so we can't reliably determine pwq from @target.
27344690c4abSTejun Heo  *
27354690c4abSTejun Heo  * CONTEXT:
2736a9b8a985SSebastian Andrzej Siewior  * raw_spin_lock_irq(pool->lock).
27374690c4abSTejun Heo  */
2738112202d9STejun Heo static void insert_wq_barrier(struct pool_workqueue *pwq,
2739affee4b2STejun Heo 			      struct wq_barrier *barr,
2740affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
2741fc2e4d70SOleg Nesterov {
2742d812796eSLai Jiangshan 	unsigned int work_flags = 0;
2743d812796eSLai Jiangshan 	unsigned int work_color;
2744affee4b2STejun Heo 	struct list_head *head;
2745affee4b2STejun Heo 
2746dc186ad7SThomas Gleixner 	/*
2747d565ed63STejun Heo 	 * debugobject calls are safe here even with pool->lock locked
2748dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
2749dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
2750dc186ad7SThomas Gleixner 	 * might deadlock.
2751dc186ad7SThomas Gleixner 	 */
2752ca1cab37SAndrew Morton 	INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
275322df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
275452fa5bc5SBoqun Feng 
2755fd1a5b04SByungchul Park 	init_completion_map(&barr->done, &target->lockdep_map);
2756fd1a5b04SByungchul Park 
27572607d7a6STejun Heo 	barr->task = current;
275883c22520SOleg Nesterov 
2759018f3a13SLai Jiangshan 	/* The barrier work item does not participate in pwq->nr_active. */
2760018f3a13SLai Jiangshan 	work_flags |= WORK_STRUCT_INACTIVE;
2761018f3a13SLai Jiangshan 
2762affee4b2STejun Heo 	/*
2763affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
2764affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
2765affee4b2STejun Heo 	 */
2766d812796eSLai Jiangshan 	if (worker) {
2767affee4b2STejun Heo 		head = worker->scheduled.next;
2768d812796eSLai Jiangshan 		work_color = worker->current_color;
2769d812796eSLai Jiangshan 	} else {
2770affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
2771affee4b2STejun Heo 
2772affee4b2STejun Heo 		head = target->entry.next;
2773affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
2774d21cece0SLai Jiangshan 		work_flags |= *bits & WORK_STRUCT_LINKED;
2775d812796eSLai Jiangshan 		work_color = get_work_color(*bits);
2776affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2777affee4b2STejun Heo 	}
2778affee4b2STejun Heo 
2779d812796eSLai Jiangshan 	pwq->nr_in_flight[work_color]++;
2780d812796eSLai Jiangshan 	work_flags |= work_color_to_flags(work_color);
2781d812796eSLai Jiangshan 
2782dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
2783d21cece0SLai Jiangshan 	insert_work(pwq, &barr->work, head, work_flags);
2784fc2e4d70SOleg Nesterov }
2785fc2e4d70SOleg Nesterov 
278673f53c4aSTejun Heo /**
2787112202d9STejun Heo  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
278873f53c4aSTejun Heo  * @wq: workqueue being flushed
278973f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
279073f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
279173f53c4aSTejun Heo  *
2792112202d9STejun Heo  * Prepare pwqs for workqueue flushing.
279373f53c4aSTejun Heo  *
2794112202d9STejun Heo  * If @flush_color is non-negative, flush_color on all pwqs should be
2795112202d9STejun Heo  * -1.  If no pwq has in-flight commands at the specified color, all
2796112202d9STejun Heo  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2797112202d9STejun Heo  * has in flight commands, its pwq->flush_color is set to
2798112202d9STejun Heo  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
279973f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
280073f53c4aSTejun Heo  *
280173f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
280273f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
280373f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
280473f53c4aSTejun Heo  * is returned.
280573f53c4aSTejun Heo  *
2806112202d9STejun Heo  * If @work_color is non-negative, all pwqs should have the same
280773f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
280873f53c4aSTejun Heo  * advanced to @work_color.
280973f53c4aSTejun Heo  *
281073f53c4aSTejun Heo  * CONTEXT:
28113c25a55dSLai Jiangshan  * mutex_lock(wq->mutex).
281273f53c4aSTejun Heo  *
2813d185af30SYacine Belkadi  * Return:
281473f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
281573f53c4aSTejun Heo  * otherwise.
281673f53c4aSTejun Heo  */
2817112202d9STejun Heo static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
281873f53c4aSTejun Heo 				      int flush_color, int work_color)
28191da177e4SLinus Torvalds {
282073f53c4aSTejun Heo 	bool wait = false;
282149e3cf44STejun Heo 	struct pool_workqueue *pwq;
28221da177e4SLinus Torvalds 
282373f53c4aSTejun Heo 	if (flush_color >= 0) {
28246183c009STejun Heo 		WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2825112202d9STejun Heo 		atomic_set(&wq->nr_pwqs_to_flush, 1);
2826dc186ad7SThomas Gleixner 	}
282714441960SOleg Nesterov 
282849e3cf44STejun Heo 	for_each_pwq(pwq, wq) {
2829112202d9STejun Heo 		struct worker_pool *pool = pwq->pool;
28301da177e4SLinus Torvalds 
2831a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&pool->lock);
283273f53c4aSTejun Heo 
283373f53c4aSTejun Heo 		if (flush_color >= 0) {
28346183c009STejun Heo 			WARN_ON_ONCE(pwq->flush_color != -1);
283573f53c4aSTejun Heo 
2836112202d9STejun Heo 			if (pwq->nr_in_flight[flush_color]) {
2837112202d9STejun Heo 				pwq->flush_color = flush_color;
2838112202d9STejun Heo 				atomic_inc(&wq->nr_pwqs_to_flush);
283973f53c4aSTejun Heo 				wait = true;
28401da177e4SLinus Torvalds 			}
284173f53c4aSTejun Heo 		}
284273f53c4aSTejun Heo 
284373f53c4aSTejun Heo 		if (work_color >= 0) {
28446183c009STejun Heo 			WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2845112202d9STejun Heo 			pwq->work_color = work_color;
284673f53c4aSTejun Heo 		}
284773f53c4aSTejun Heo 
2848a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pool->lock);
28491da177e4SLinus Torvalds 	}
28501da177e4SLinus Torvalds 
2851112202d9STejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
285273f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
285373f53c4aSTejun Heo 
285473f53c4aSTejun Heo 	return wait;
285583c22520SOleg Nesterov }
28561da177e4SLinus Torvalds 
28570fcb78c2SRolf Eike Beer /**
2858c4f135d6STetsuo Handa  * __flush_workqueue - ensure that any scheduled work has run to completion.
28590fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
28601da177e4SLinus Torvalds  *
2861c5aa87bbSTejun Heo  * This function sleeps until all work items which were queued on entry
2862c5aa87bbSTejun Heo  * have finished execution, but it is not livelocked by new incoming ones.
28631da177e4SLinus Torvalds  */
2864c4f135d6STetsuo Handa void __flush_workqueue(struct workqueue_struct *wq)
28651da177e4SLinus Torvalds {
286673f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
286773f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
286873f53c4aSTejun Heo 		.flush_color = -1,
2869fd1a5b04SByungchul Park 		.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
287073f53c4aSTejun Heo 	};
287173f53c4aSTejun Heo 	int next_color;
2872b1f4ec17SOleg Nesterov 
28733347fa09STejun Heo 	if (WARN_ON(!wq_online))
28743347fa09STejun Heo 		return;
28753347fa09STejun Heo 
287687915adcSJohannes Berg 	lock_map_acquire(&wq->lockdep_map);
287787915adcSJohannes Berg 	lock_map_release(&wq->lockdep_map);
287887915adcSJohannes Berg 
28793c25a55dSLai Jiangshan 	mutex_lock(&wq->mutex);
288073f53c4aSTejun Heo 
288173f53c4aSTejun Heo 	/*
288273f53c4aSTejun Heo 	 * Start-to-wait phase
288373f53c4aSTejun Heo 	 */
288473f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
288573f53c4aSTejun Heo 
288673f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
288773f53c4aSTejun Heo 		/*
288873f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
288973f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
289073f53c4aSTejun Heo 		 * by one.
289173f53c4aSTejun Heo 		 */
28926183c009STejun Heo 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
289373f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
289473f53c4aSTejun Heo 		wq->work_color = next_color;
289573f53c4aSTejun Heo 
289673f53c4aSTejun Heo 		if (!wq->first_flusher) {
289773f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
28986183c009STejun Heo 			WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
289973f53c4aSTejun Heo 
290073f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
290173f53c4aSTejun Heo 
2902112202d9STejun Heo 			if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
290373f53c4aSTejun Heo 						       wq->work_color)) {
290473f53c4aSTejun Heo 				/* nothing to flush, done */
290573f53c4aSTejun Heo 				wq->flush_color = next_color;
290673f53c4aSTejun Heo 				wq->first_flusher = NULL;
290773f53c4aSTejun Heo 				goto out_unlock;
290873f53c4aSTejun Heo 			}
290973f53c4aSTejun Heo 		} else {
291073f53c4aSTejun Heo 			/* wait in queue */
29116183c009STejun Heo 			WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
291273f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
2913112202d9STejun Heo 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
291473f53c4aSTejun Heo 		}
291573f53c4aSTejun Heo 	} else {
291673f53c4aSTejun Heo 		/*
291773f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
291873f53c4aSTejun Heo 		 * The next flush completion will assign us
291973f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
292073f53c4aSTejun Heo 		 */
292173f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
292273f53c4aSTejun Heo 	}
292373f53c4aSTejun Heo 
2924fca839c0STejun Heo 	check_flush_dependency(wq, NULL);
2925fca839c0STejun Heo 
29263c25a55dSLai Jiangshan 	mutex_unlock(&wq->mutex);
292773f53c4aSTejun Heo 
292873f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
292973f53c4aSTejun Heo 
293073f53c4aSTejun Heo 	/*
293173f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
293273f53c4aSTejun Heo 	 *
293373f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
293473f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
293573f53c4aSTejun Heo 	 */
293600d5d15bSChris Wilson 	if (READ_ONCE(wq->first_flusher) != &this_flusher)
293773f53c4aSTejun Heo 		return;
293873f53c4aSTejun Heo 
29393c25a55dSLai Jiangshan 	mutex_lock(&wq->mutex);
294073f53c4aSTejun Heo 
29414ce48b37STejun Heo 	/* we might have raced, check again with mutex held */
29424ce48b37STejun Heo 	if (wq->first_flusher != &this_flusher)
29434ce48b37STejun Heo 		goto out_unlock;
29444ce48b37STejun Heo 
294500d5d15bSChris Wilson 	WRITE_ONCE(wq->first_flusher, NULL);
294673f53c4aSTejun Heo 
29476183c009STejun Heo 	WARN_ON_ONCE(!list_empty(&this_flusher.list));
29486183c009STejun Heo 	WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
294973f53c4aSTejun Heo 
295073f53c4aSTejun Heo 	while (true) {
295173f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
295273f53c4aSTejun Heo 
295373f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
295473f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
295573f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
295673f53c4aSTejun Heo 				break;
295773f53c4aSTejun Heo 			list_del_init(&next->list);
295873f53c4aSTejun Heo 			complete(&next->done);
295973f53c4aSTejun Heo 		}
296073f53c4aSTejun Heo 
29616183c009STejun Heo 		WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
296273f53c4aSTejun Heo 			     wq->flush_color != work_next_color(wq->work_color));
296373f53c4aSTejun Heo 
296473f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
296573f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
296673f53c4aSTejun Heo 
296773f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
296873f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
296973f53c4aSTejun Heo 			/*
297073f53c4aSTejun Heo 			 * Assign the same color to all overflowed
297173f53c4aSTejun Heo 			 * flushers, advance work_color and append to
297273f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
297373f53c4aSTejun Heo 			 * phase for these overflowed flushers.
297473f53c4aSTejun Heo 			 */
297573f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
297673f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
297773f53c4aSTejun Heo 
297873f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
297973f53c4aSTejun Heo 
298073f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
298173f53c4aSTejun Heo 					      &wq->flusher_queue);
2982112202d9STejun Heo 			flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
298373f53c4aSTejun Heo 		}
298473f53c4aSTejun Heo 
298573f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
29866183c009STejun Heo 			WARN_ON_ONCE(wq->flush_color != wq->work_color);
298773f53c4aSTejun Heo 			break;
298873f53c4aSTejun Heo 		}
298973f53c4aSTejun Heo 
299073f53c4aSTejun Heo 		/*
299173f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
2992112202d9STejun Heo 		 * the new first flusher and arm pwqs.
299373f53c4aSTejun Heo 		 */
29946183c009STejun Heo 		WARN_ON_ONCE(wq->flush_color == wq->work_color);
29956183c009STejun Heo 		WARN_ON_ONCE(wq->flush_color != next->flush_color);
299673f53c4aSTejun Heo 
299773f53c4aSTejun Heo 		list_del_init(&next->list);
299873f53c4aSTejun Heo 		wq->first_flusher = next;
299973f53c4aSTejun Heo 
3000112202d9STejun Heo 		if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
300173f53c4aSTejun Heo 			break;
300273f53c4aSTejun Heo 
300373f53c4aSTejun Heo 		/*
300473f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
300573f53c4aSTejun Heo 		 * flusher and repeat cascading.
300673f53c4aSTejun Heo 		 */
300773f53c4aSTejun Heo 		wq->first_flusher = NULL;
300873f53c4aSTejun Heo 	}
300973f53c4aSTejun Heo 
301073f53c4aSTejun Heo out_unlock:
30113c25a55dSLai Jiangshan 	mutex_unlock(&wq->mutex);
30121da177e4SLinus Torvalds }
3013c4f135d6STetsuo Handa EXPORT_SYMBOL(__flush_workqueue);
30141da177e4SLinus Torvalds 
30159c5a2ba7STejun Heo /**
30169c5a2ba7STejun Heo  * drain_workqueue - drain a workqueue
30179c5a2ba7STejun Heo  * @wq: workqueue to drain
30189c5a2ba7STejun Heo  *
30199c5a2ba7STejun Heo  * Wait until the workqueue becomes empty.  While draining is in progress,
30209c5a2ba7STejun Heo  * only chain queueing is allowed.  IOW, only currently pending or running
30219c5a2ba7STejun Heo  * work items on @wq can queue further work items on it.  @wq is flushed
3022b749b1b6SChen Hanxiao  * repeatedly until it becomes empty.  The number of flushing is determined
30239c5a2ba7STejun Heo  * by the depth of chaining and should be relatively short.  Whine if it
30249c5a2ba7STejun Heo  * takes too long.
30259c5a2ba7STejun Heo  */
30269c5a2ba7STejun Heo void drain_workqueue(struct workqueue_struct *wq)
30279c5a2ba7STejun Heo {
30289c5a2ba7STejun Heo 	unsigned int flush_cnt = 0;
302949e3cf44STejun Heo 	struct pool_workqueue *pwq;
30309c5a2ba7STejun Heo 
30319c5a2ba7STejun Heo 	/*
30329c5a2ba7STejun Heo 	 * __queue_work() needs to test whether there are drainers, is much
30339c5a2ba7STejun Heo 	 * hotter than drain_workqueue() and already looks at @wq->flags.
3034618b01ebSTejun Heo 	 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
30359c5a2ba7STejun Heo 	 */
303687fc741eSLai Jiangshan 	mutex_lock(&wq->mutex);
30379c5a2ba7STejun Heo 	if (!wq->nr_drainers++)
3038618b01ebSTejun Heo 		wq->flags |= __WQ_DRAINING;
303987fc741eSLai Jiangshan 	mutex_unlock(&wq->mutex);
30409c5a2ba7STejun Heo reflush:
3041c4f135d6STetsuo Handa 	__flush_workqueue(wq);
30429c5a2ba7STejun Heo 
3043b09f4fd3SLai Jiangshan 	mutex_lock(&wq->mutex);
304476af4d93STejun Heo 
304549e3cf44STejun Heo 	for_each_pwq(pwq, wq) {
3046fa2563e4SThomas Tuttle 		bool drained;
30479c5a2ba7STejun Heo 
3048a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&pwq->pool->lock);
3049f97a4a1aSLai Jiangshan 		drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
3050a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pwq->pool->lock);
3051fa2563e4SThomas Tuttle 
3052fa2563e4SThomas Tuttle 		if (drained)
30539c5a2ba7STejun Heo 			continue;
30549c5a2ba7STejun Heo 
30559c5a2ba7STejun Heo 		if (++flush_cnt == 10 ||
30569c5a2ba7STejun Heo 		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3057e9ad2eb3SStephen Zhang 			pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3058e9ad2eb3SStephen Zhang 				wq->name, __func__, flush_cnt);
305976af4d93STejun Heo 
3060b09f4fd3SLai Jiangshan 		mutex_unlock(&wq->mutex);
30619c5a2ba7STejun Heo 		goto reflush;
30629c5a2ba7STejun Heo 	}
30639c5a2ba7STejun Heo 
30649c5a2ba7STejun Heo 	if (!--wq->nr_drainers)
3065618b01ebSTejun Heo 		wq->flags &= ~__WQ_DRAINING;
306687fc741eSLai Jiangshan 	mutex_unlock(&wq->mutex);
30679c5a2ba7STejun Heo }
30689c5a2ba7STejun Heo EXPORT_SYMBOL_GPL(drain_workqueue);
30699c5a2ba7STejun Heo 
3070d6e89786SJohannes Berg static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3071d6e89786SJohannes Berg 			     bool from_cancel)
3072baf59022STejun Heo {
3073baf59022STejun Heo 	struct worker *worker = NULL;
3074c9e7cf27STejun Heo 	struct worker_pool *pool;
3075112202d9STejun Heo 	struct pool_workqueue *pwq;
3076baf59022STejun Heo 
3077baf59022STejun Heo 	might_sleep();
3078baf59022STejun Heo 
307924acfb71SThomas Gleixner 	rcu_read_lock();
3080fa1b54e6STejun Heo 	pool = get_work_pool(work);
3081fa1b54e6STejun Heo 	if (!pool) {
308224acfb71SThomas Gleixner 		rcu_read_unlock();
3083fa1b54e6STejun Heo 		return false;
3084fa1b54e6STejun Heo 	}
3085fa1b54e6STejun Heo 
3086a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
30870b3dae68SLai Jiangshan 	/* see the comment in try_to_grab_pending() with the same code */
3088112202d9STejun Heo 	pwq = get_work_pwq(work);
3089112202d9STejun Heo 	if (pwq) {
3090112202d9STejun Heo 		if (unlikely(pwq->pool != pool))
3091baf59022STejun Heo 			goto already_gone;
3092606a5020STejun Heo 	} else {
3093c9e7cf27STejun Heo 		worker = find_worker_executing_work(pool, work);
3094baf59022STejun Heo 		if (!worker)
3095baf59022STejun Heo 			goto already_gone;
3096112202d9STejun Heo 		pwq = worker->current_pwq;
3097606a5020STejun Heo 	}
3098baf59022STejun Heo 
3099fca839c0STejun Heo 	check_flush_dependency(pwq->wq, work);
3100fca839c0STejun Heo 
3101112202d9STejun Heo 	insert_wq_barrier(pwq, barr, work, worker);
3102a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
3103baf59022STejun Heo 
3104e159489bSTejun Heo 	/*
3105a1d14934SPeter Zijlstra 	 * Force a lock recursion deadlock when using flush_work() inside a
3106a1d14934SPeter Zijlstra 	 * single-threaded or rescuer equipped workqueue.
3107a1d14934SPeter Zijlstra 	 *
3108a1d14934SPeter Zijlstra 	 * For single threaded workqueues the deadlock happens when the work
3109a1d14934SPeter Zijlstra 	 * is after the work issuing the flush_work(). For rescuer equipped
3110a1d14934SPeter Zijlstra 	 * workqueues the deadlock happens when the rescuer stalls, blocking
3111a1d14934SPeter Zijlstra 	 * forward progress.
3112e159489bSTejun Heo 	 */
3113d6e89786SJohannes Berg 	if (!from_cancel &&
3114d6e89786SJohannes Berg 	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3115112202d9STejun Heo 		lock_map_acquire(&pwq->wq->lockdep_map);
3116112202d9STejun Heo 		lock_map_release(&pwq->wq->lockdep_map);
3117a1d14934SPeter Zijlstra 	}
311824acfb71SThomas Gleixner 	rcu_read_unlock();
3119baf59022STejun Heo 	return true;
3120baf59022STejun Heo already_gone:
3121a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
312224acfb71SThomas Gleixner 	rcu_read_unlock();
3123baf59022STejun Heo 	return false;
3124baf59022STejun Heo }
3125baf59022STejun Heo 
3126d6e89786SJohannes Berg static bool __flush_work(struct work_struct *work, bool from_cancel)
3127d6e89786SJohannes Berg {
3128d6e89786SJohannes Berg 	struct wq_barrier barr;
3129d6e89786SJohannes Berg 
3130d6e89786SJohannes Berg 	if (WARN_ON(!wq_online))
3131d6e89786SJohannes Berg 		return false;
3132d6e89786SJohannes Berg 
31334d43d395STetsuo Handa 	if (WARN_ON(!work->func))
31344d43d395STetsuo Handa 		return false;
31354d43d395STetsuo Handa 
313687915adcSJohannes Berg 	lock_map_acquire(&work->lockdep_map);
313787915adcSJohannes Berg 	lock_map_release(&work->lockdep_map);
313887915adcSJohannes Berg 
3139d6e89786SJohannes Berg 	if (start_flush_work(work, &barr, from_cancel)) {
3140d6e89786SJohannes Berg 		wait_for_completion(&barr.done);
3141d6e89786SJohannes Berg 		destroy_work_on_stack(&barr.work);
3142d6e89786SJohannes Berg 		return true;
3143d6e89786SJohannes Berg 	} else {
3144d6e89786SJohannes Berg 		return false;
3145d6e89786SJohannes Berg 	}
3146d6e89786SJohannes Berg }
3147d6e89786SJohannes Berg 
3148db700897SOleg Nesterov /**
3149401a8d04STejun Heo  * flush_work - wait for a work to finish executing the last queueing instance
3150401a8d04STejun Heo  * @work: the work to flush
3151db700897SOleg Nesterov  *
3152606a5020STejun Heo  * Wait until @work has finished execution.  @work is guaranteed to be idle
3153606a5020STejun Heo  * on return if it hasn't been requeued since flush started.
3154401a8d04STejun Heo  *
3155d185af30SYacine Belkadi  * Return:
3156401a8d04STejun Heo  * %true if flush_work() waited for the work to finish execution,
3157401a8d04STejun Heo  * %false if it was already idle.
3158db700897SOleg Nesterov  */
3159401a8d04STejun Heo bool flush_work(struct work_struct *work)
3160db700897SOleg Nesterov {
3161d6e89786SJohannes Berg 	return __flush_work(work, false);
3162606a5020STejun Heo }
3163db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
3164db700897SOleg Nesterov 
31658603e1b3STejun Heo struct cwt_wait {
3166ac6424b9SIngo Molnar 	wait_queue_entry_t		wait;
31678603e1b3STejun Heo 	struct work_struct	*work;
31688603e1b3STejun Heo };
31698603e1b3STejun Heo 
3170ac6424b9SIngo Molnar static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
31718603e1b3STejun Heo {
31728603e1b3STejun Heo 	struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
31738603e1b3STejun Heo 
31748603e1b3STejun Heo 	if (cwait->work != key)
31758603e1b3STejun Heo 		return 0;
31768603e1b3STejun Heo 	return autoremove_wake_function(wait, mode, sync, key);
31778603e1b3STejun Heo }
31788603e1b3STejun Heo 
317936e227d2STejun Heo static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3180401a8d04STejun Heo {
31818603e1b3STejun Heo 	static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3182bbb68dfaSTejun Heo 	unsigned long flags;
31831f1f642eSOleg Nesterov 	int ret;
31841f1f642eSOleg Nesterov 
31851f1f642eSOleg Nesterov 	do {
3186bbb68dfaSTejun Heo 		ret = try_to_grab_pending(work, is_dwork, &flags);
3187bbb68dfaSTejun Heo 		/*
31888603e1b3STejun Heo 		 * If someone else is already canceling, wait for it to
31898603e1b3STejun Heo 		 * finish.  flush_work() doesn't work for PREEMPT_NONE
31908603e1b3STejun Heo 		 * because we may get scheduled between @work's completion
31918603e1b3STejun Heo 		 * and the other canceling task resuming and clearing
31928603e1b3STejun Heo 		 * CANCELING - flush_work() will return false immediately
31938603e1b3STejun Heo 		 * as @work is no longer busy, try_to_grab_pending() will
31948603e1b3STejun Heo 		 * return -ENOENT as @work is still being canceled and the
31958603e1b3STejun Heo 		 * other canceling task won't be able to clear CANCELING as
31968603e1b3STejun Heo 		 * we're hogging the CPU.
31978603e1b3STejun Heo 		 *
31988603e1b3STejun Heo 		 * Let's wait for completion using a waitqueue.  As this
31998603e1b3STejun Heo 		 * may lead to the thundering herd problem, use a custom
32008603e1b3STejun Heo 		 * wake function which matches @work along with exclusive
32018603e1b3STejun Heo 		 * wait and wakeup.
3202bbb68dfaSTejun Heo 		 */
32038603e1b3STejun Heo 		if (unlikely(ret == -ENOENT)) {
32048603e1b3STejun Heo 			struct cwt_wait cwait;
32058603e1b3STejun Heo 
32068603e1b3STejun Heo 			init_wait(&cwait.wait);
32078603e1b3STejun Heo 			cwait.wait.func = cwt_wakefn;
32088603e1b3STejun Heo 			cwait.work = work;
32098603e1b3STejun Heo 
32108603e1b3STejun Heo 			prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
32118603e1b3STejun Heo 						  TASK_UNINTERRUPTIBLE);
32128603e1b3STejun Heo 			if (work_is_canceling(work))
32138603e1b3STejun Heo 				schedule();
32148603e1b3STejun Heo 			finish_wait(&cancel_waitq, &cwait.wait);
32158603e1b3STejun Heo 		}
32161f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
32171f1f642eSOleg Nesterov 
3218bbb68dfaSTejun Heo 	/* tell other tasks trying to grab @work to back off */
3219bbb68dfaSTejun Heo 	mark_work_canceling(work);
3220bbb68dfaSTejun Heo 	local_irq_restore(flags);
3221bbb68dfaSTejun Heo 
32223347fa09STejun Heo 	/*
32233347fa09STejun Heo 	 * This allows canceling during early boot.  We know that @work
32243347fa09STejun Heo 	 * isn't executing.
32253347fa09STejun Heo 	 */
32263347fa09STejun Heo 	if (wq_online)
3227d6e89786SJohannes Berg 		__flush_work(work, true);
32283347fa09STejun Heo 
32297a22ad75STejun Heo 	clear_work_data(work);
32308603e1b3STejun Heo 
32318603e1b3STejun Heo 	/*
32328603e1b3STejun Heo 	 * Paired with prepare_to_wait() above so that either
32338603e1b3STejun Heo 	 * waitqueue_active() is visible here or !work_is_canceling() is
32348603e1b3STejun Heo 	 * visible there.
32358603e1b3STejun Heo 	 */
32368603e1b3STejun Heo 	smp_mb();
32378603e1b3STejun Heo 	if (waitqueue_active(&cancel_waitq))
32388603e1b3STejun Heo 		__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
32398603e1b3STejun Heo 
32401f1f642eSOleg Nesterov 	return ret;
32411f1f642eSOleg Nesterov }
32421f1f642eSOleg Nesterov 
32436e84d644SOleg Nesterov /**
3244401a8d04STejun Heo  * cancel_work_sync - cancel a work and wait for it to finish
3245401a8d04STejun Heo  * @work: the work to cancel
32466e84d644SOleg Nesterov  *
3247401a8d04STejun Heo  * Cancel @work and wait for its execution to finish.  This function
3248401a8d04STejun Heo  * can be used even if the work re-queues itself or migrates to
3249401a8d04STejun Heo  * another workqueue.  On return from this function, @work is
3250401a8d04STejun Heo  * guaranteed to be not pending or executing on any CPU.
32511f1f642eSOleg Nesterov  *
3252401a8d04STejun Heo  * cancel_work_sync(&delayed_work->work) must not be used for
3253401a8d04STejun Heo  * delayed_work's.  Use cancel_delayed_work_sync() instead.
32546e84d644SOleg Nesterov  *
3255401a8d04STejun Heo  * The caller must ensure that the workqueue on which @work was last
32566e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
3257401a8d04STejun Heo  *
3258d185af30SYacine Belkadi  * Return:
3259401a8d04STejun Heo  * %true if @work was pending, %false otherwise.
32606e84d644SOleg Nesterov  */
3261401a8d04STejun Heo bool cancel_work_sync(struct work_struct *work)
32626e84d644SOleg Nesterov {
326336e227d2STejun Heo 	return __cancel_work_timer(work, false);
3264b89deed3SOleg Nesterov }
326528e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
3266b89deed3SOleg Nesterov 
32676e84d644SOleg Nesterov /**
3268401a8d04STejun Heo  * flush_delayed_work - wait for a dwork to finish executing the last queueing
3269401a8d04STejun Heo  * @dwork: the delayed work to flush
32706e84d644SOleg Nesterov  *
3271401a8d04STejun Heo  * Delayed timer is cancelled and the pending work is queued for
3272401a8d04STejun Heo  * immediate execution.  Like flush_work(), this function only
3273401a8d04STejun Heo  * considers the last queueing instance of @dwork.
32741f1f642eSOleg Nesterov  *
3275d185af30SYacine Belkadi  * Return:
3276401a8d04STejun Heo  * %true if flush_work() waited for the work to finish execution,
3277401a8d04STejun Heo  * %false if it was already idle.
32786e84d644SOleg Nesterov  */
3279401a8d04STejun Heo bool flush_delayed_work(struct delayed_work *dwork)
3280401a8d04STejun Heo {
32818930cabaSTejun Heo 	local_irq_disable();
3282401a8d04STejun Heo 	if (del_timer_sync(&dwork->timer))
328360c057bcSLai Jiangshan 		__queue_work(dwork->cpu, dwork->wq, &dwork->work);
32848930cabaSTejun Heo 	local_irq_enable();
3285401a8d04STejun Heo 	return flush_work(&dwork->work);
3286401a8d04STejun Heo }
3287401a8d04STejun Heo EXPORT_SYMBOL(flush_delayed_work);
3288401a8d04STejun Heo 
328905f0fe6bSTejun Heo /**
329005f0fe6bSTejun Heo  * flush_rcu_work - wait for a rwork to finish executing the last queueing
329105f0fe6bSTejun Heo  * @rwork: the rcu work to flush
329205f0fe6bSTejun Heo  *
329305f0fe6bSTejun Heo  * Return:
329405f0fe6bSTejun Heo  * %true if flush_rcu_work() waited for the work to finish execution,
329505f0fe6bSTejun Heo  * %false if it was already idle.
329605f0fe6bSTejun Heo  */
329705f0fe6bSTejun Heo bool flush_rcu_work(struct rcu_work *rwork)
329805f0fe6bSTejun Heo {
329905f0fe6bSTejun Heo 	if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
330005f0fe6bSTejun Heo 		rcu_barrier();
330105f0fe6bSTejun Heo 		flush_work(&rwork->work);
330205f0fe6bSTejun Heo 		return true;
330305f0fe6bSTejun Heo 	} else {
330405f0fe6bSTejun Heo 		return flush_work(&rwork->work);
330505f0fe6bSTejun Heo 	}
330605f0fe6bSTejun Heo }
330705f0fe6bSTejun Heo EXPORT_SYMBOL(flush_rcu_work);
330805f0fe6bSTejun Heo 
3309f72b8792SJens Axboe static bool __cancel_work(struct work_struct *work, bool is_dwork)
3310f72b8792SJens Axboe {
3311f72b8792SJens Axboe 	unsigned long flags;
3312f72b8792SJens Axboe 	int ret;
3313f72b8792SJens Axboe 
3314f72b8792SJens Axboe 	do {
3315f72b8792SJens Axboe 		ret = try_to_grab_pending(work, is_dwork, &flags);
3316f72b8792SJens Axboe 	} while (unlikely(ret == -EAGAIN));
3317f72b8792SJens Axboe 
3318f72b8792SJens Axboe 	if (unlikely(ret < 0))
3319f72b8792SJens Axboe 		return false;
3320f72b8792SJens Axboe 
3321f72b8792SJens Axboe 	set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3322f72b8792SJens Axboe 	local_irq_restore(flags);
3323f72b8792SJens Axboe 	return ret;
3324f72b8792SJens Axboe }
3325f72b8792SJens Axboe 
332673b4b532SAndrey Grodzovsky /*
332773b4b532SAndrey Grodzovsky  * See cancel_delayed_work()
332873b4b532SAndrey Grodzovsky  */
332973b4b532SAndrey Grodzovsky bool cancel_work(struct work_struct *work)
333073b4b532SAndrey Grodzovsky {
333173b4b532SAndrey Grodzovsky 	return __cancel_work(work, false);
333273b4b532SAndrey Grodzovsky }
333373b4b532SAndrey Grodzovsky EXPORT_SYMBOL(cancel_work);
333473b4b532SAndrey Grodzovsky 
3335401a8d04STejun Heo /**
333657b30ae7STejun Heo  * cancel_delayed_work - cancel a delayed work
333757b30ae7STejun Heo  * @dwork: delayed_work to cancel
333809383498STejun Heo  *
3339d185af30SYacine Belkadi  * Kill off a pending delayed_work.
3340d185af30SYacine Belkadi  *
3341d185af30SYacine Belkadi  * Return: %true if @dwork was pending and canceled; %false if it wasn't
3342d185af30SYacine Belkadi  * pending.
3343d185af30SYacine Belkadi  *
3344d185af30SYacine Belkadi  * Note:
3345d185af30SYacine Belkadi  * The work callback function may still be running on return, unless
3346d185af30SYacine Belkadi  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3347d185af30SYacine Belkadi  * use cancel_delayed_work_sync() to wait on it.
334809383498STejun Heo  *
334957b30ae7STejun Heo  * This function is safe to call from any context including IRQ handler.
335009383498STejun Heo  */
335157b30ae7STejun Heo bool cancel_delayed_work(struct delayed_work *dwork)
335209383498STejun Heo {
3353f72b8792SJens Axboe 	return __cancel_work(&dwork->work, true);
335409383498STejun Heo }
335557b30ae7STejun Heo EXPORT_SYMBOL(cancel_delayed_work);
335609383498STejun Heo 
335709383498STejun Heo /**
3358401a8d04STejun Heo  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3359401a8d04STejun Heo  * @dwork: the delayed work cancel
3360401a8d04STejun Heo  *
3361401a8d04STejun Heo  * This is cancel_work_sync() for delayed works.
3362401a8d04STejun Heo  *
3363d185af30SYacine Belkadi  * Return:
3364401a8d04STejun Heo  * %true if @dwork was pending, %false otherwise.
3365401a8d04STejun Heo  */
3366401a8d04STejun Heo bool cancel_delayed_work_sync(struct delayed_work *dwork)
33676e84d644SOleg Nesterov {
336836e227d2STejun Heo 	return __cancel_work_timer(&dwork->work, true);
33696e84d644SOleg Nesterov }
3370f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
33711da177e4SLinus Torvalds 
33720fcb78c2SRolf Eike Beer /**
337331ddd871STejun Heo  * schedule_on_each_cpu - execute a function synchronously on each online CPU
3374b6136773SAndrew Morton  * @func: the function to call
3375b6136773SAndrew Morton  *
337631ddd871STejun Heo  * schedule_on_each_cpu() executes @func on each online CPU using the
337731ddd871STejun Heo  * system workqueue and blocks until all CPUs have completed.
3378b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
337931ddd871STejun Heo  *
3380d185af30SYacine Belkadi  * Return:
338131ddd871STejun Heo  * 0 on success, -errno on failure.
3382b6136773SAndrew Morton  */
338365f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
338415316ba8SChristoph Lameter {
338515316ba8SChristoph Lameter 	int cpu;
338638f51568SNamhyung Kim 	struct work_struct __percpu *works;
338715316ba8SChristoph Lameter 
3388b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
3389b6136773SAndrew Morton 	if (!works)
339015316ba8SChristoph Lameter 		return -ENOMEM;
3391b6136773SAndrew Morton 
3392ffd8bea8SSebastian Andrzej Siewior 	cpus_read_lock();
339393981800STejun Heo 
339415316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
33959bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
33969bfb1839SIngo Molnar 
33979bfb1839SIngo Molnar 		INIT_WORK(work, func);
33988de6d308SOleg Nesterov 		schedule_work_on(cpu, work);
339915316ba8SChristoph Lameter 	}
340093981800STejun Heo 
340193981800STejun Heo 	for_each_online_cpu(cpu)
34028616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
340393981800STejun Heo 
3404ffd8bea8SSebastian Andrzej Siewior 	cpus_read_unlock();
3405b6136773SAndrew Morton 	free_percpu(works);
340615316ba8SChristoph Lameter 	return 0;
340715316ba8SChristoph Lameter }
340815316ba8SChristoph Lameter 
3409eef6a7d5SAlan Stern /**
34101fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
34111fa44ecaSJames Bottomley  * @fn:		the function to execute
34121fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
34131fa44ecaSJames Bottomley  *		be available when the work executes)
34141fa44ecaSJames Bottomley  *
34151fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
34161fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
34171fa44ecaSJames Bottomley  *
3418d185af30SYacine Belkadi  * Return:	0 - function was executed
34191fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
34201fa44ecaSJames Bottomley  */
342165f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
34221fa44ecaSJames Bottomley {
34231fa44ecaSJames Bottomley 	if (!in_interrupt()) {
342465f27f38SDavid Howells 		fn(&ew->work);
34251fa44ecaSJames Bottomley 		return 0;
34261fa44ecaSJames Bottomley 	}
34271fa44ecaSJames Bottomley 
342865f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
34291fa44ecaSJames Bottomley 	schedule_work(&ew->work);
34301fa44ecaSJames Bottomley 
34311fa44ecaSJames Bottomley 	return 1;
34321fa44ecaSJames Bottomley }
34331fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
34341fa44ecaSJames Bottomley 
34357a4e344cSTejun Heo /**
34367a4e344cSTejun Heo  * free_workqueue_attrs - free a workqueue_attrs
34377a4e344cSTejun Heo  * @attrs: workqueue_attrs to free
34387a4e344cSTejun Heo  *
34397a4e344cSTejun Heo  * Undo alloc_workqueue_attrs().
34407a4e344cSTejun Heo  */
3441513c98d0SDaniel Jordan void free_workqueue_attrs(struct workqueue_attrs *attrs)
34427a4e344cSTejun Heo {
34437a4e344cSTejun Heo 	if (attrs) {
34447a4e344cSTejun Heo 		free_cpumask_var(attrs->cpumask);
34457a4e344cSTejun Heo 		kfree(attrs);
34467a4e344cSTejun Heo 	}
34477a4e344cSTejun Heo }
34487a4e344cSTejun Heo 
34497a4e344cSTejun Heo /**
34507a4e344cSTejun Heo  * alloc_workqueue_attrs - allocate a workqueue_attrs
34517a4e344cSTejun Heo  *
34527a4e344cSTejun Heo  * Allocate a new workqueue_attrs, initialize with default settings and
3453d185af30SYacine Belkadi  * return it.
3454d185af30SYacine Belkadi  *
3455d185af30SYacine Belkadi  * Return: The allocated new workqueue_attr on success. %NULL on failure.
34567a4e344cSTejun Heo  */
3457513c98d0SDaniel Jordan struct workqueue_attrs *alloc_workqueue_attrs(void)
34587a4e344cSTejun Heo {
34597a4e344cSTejun Heo 	struct workqueue_attrs *attrs;
34607a4e344cSTejun Heo 
3461be69d00dSThomas Gleixner 	attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
34627a4e344cSTejun Heo 	if (!attrs)
34637a4e344cSTejun Heo 		goto fail;
3464be69d00dSThomas Gleixner 	if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
34657a4e344cSTejun Heo 		goto fail;
34667a4e344cSTejun Heo 
346713e2e556STejun Heo 	cpumask_copy(attrs->cpumask, cpu_possible_mask);
34687a4e344cSTejun Heo 	return attrs;
34697a4e344cSTejun Heo fail:
34707a4e344cSTejun Heo 	free_workqueue_attrs(attrs);
34717a4e344cSTejun Heo 	return NULL;
34727a4e344cSTejun Heo }
34737a4e344cSTejun Heo 
347429c91e99STejun Heo static void copy_workqueue_attrs(struct workqueue_attrs *to,
347529c91e99STejun Heo 				 const struct workqueue_attrs *from)
347629c91e99STejun Heo {
347729c91e99STejun Heo 	to->nice = from->nice;
347829c91e99STejun Heo 	cpumask_copy(to->cpumask, from->cpumask);
34792865a8fbSShaohua Li 	/*
34802865a8fbSShaohua Li 	 * Unlike hash and equality test, this function doesn't ignore
34812865a8fbSShaohua Li 	 * ->no_numa as it is used for both pool and wq attrs.  Instead,
34822865a8fbSShaohua Li 	 * get_unbound_pool() explicitly clears ->no_numa after copying.
34832865a8fbSShaohua Li 	 */
34842865a8fbSShaohua Li 	to->no_numa = from->no_numa;
348529c91e99STejun Heo }
348629c91e99STejun Heo 
348729c91e99STejun Heo /* hash value of the content of @attr */
348829c91e99STejun Heo static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
348929c91e99STejun Heo {
349029c91e99STejun Heo 	u32 hash = 0;
349129c91e99STejun Heo 
349229c91e99STejun Heo 	hash = jhash_1word(attrs->nice, hash);
349313e2e556STejun Heo 	hash = jhash(cpumask_bits(attrs->cpumask),
349413e2e556STejun Heo 		     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
349529c91e99STejun Heo 	return hash;
349629c91e99STejun Heo }
349729c91e99STejun Heo 
349829c91e99STejun Heo /* content equality test */
349929c91e99STejun Heo static bool wqattrs_equal(const struct workqueue_attrs *a,
350029c91e99STejun Heo 			  const struct workqueue_attrs *b)
350129c91e99STejun Heo {
350229c91e99STejun Heo 	if (a->nice != b->nice)
350329c91e99STejun Heo 		return false;
350429c91e99STejun Heo 	if (!cpumask_equal(a->cpumask, b->cpumask))
350529c91e99STejun Heo 		return false;
350629c91e99STejun Heo 	return true;
350729c91e99STejun Heo }
350829c91e99STejun Heo 
35097a4e344cSTejun Heo /**
35107a4e344cSTejun Heo  * init_worker_pool - initialize a newly zalloc'd worker_pool
35117a4e344cSTejun Heo  * @pool: worker_pool to initialize
35127a4e344cSTejun Heo  *
3513402dd89dSShailendra Verma  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3514d185af30SYacine Belkadi  *
3515d185af30SYacine Belkadi  * Return: 0 on success, -errno on failure.  Even on failure, all fields
351629c91e99STejun Heo  * inside @pool proper are initialized and put_unbound_pool() can be called
351729c91e99STejun Heo  * on @pool safely to release it.
35187a4e344cSTejun Heo  */
35197a4e344cSTejun Heo static int init_worker_pool(struct worker_pool *pool)
35204e1a1f9aSTejun Heo {
3521a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_init(&pool->lock);
352229c91e99STejun Heo 	pool->id = -1;
352329c91e99STejun Heo 	pool->cpu = -1;
3524f3f90ad4STejun Heo 	pool->node = NUMA_NO_NODE;
35254e1a1f9aSTejun Heo 	pool->flags |= POOL_DISASSOCIATED;
352682607adcSTejun Heo 	pool->watchdog_ts = jiffies;
35274e1a1f9aSTejun Heo 	INIT_LIST_HEAD(&pool->worklist);
35284e1a1f9aSTejun Heo 	INIT_LIST_HEAD(&pool->idle_list);
35294e1a1f9aSTejun Heo 	hash_init(pool->busy_hash);
35304e1a1f9aSTejun Heo 
353132a6c723SKees Cook 	timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3532*3f959aa3SValentin Schneider 	INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
35334e1a1f9aSTejun Heo 
353432a6c723SKees Cook 	timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
35354e1a1f9aSTejun Heo 
3536da028469SLai Jiangshan 	INIT_LIST_HEAD(&pool->workers);
35377a4e344cSTejun Heo 
35387cda9aaeSLai Jiangshan 	ida_init(&pool->worker_ida);
353929c91e99STejun Heo 	INIT_HLIST_NODE(&pool->hash_node);
354029c91e99STejun Heo 	pool->refcnt = 1;
354129c91e99STejun Heo 
354229c91e99STejun Heo 	/* shouldn't fail above this point */
3543be69d00dSThomas Gleixner 	pool->attrs = alloc_workqueue_attrs();
35447a4e344cSTejun Heo 	if (!pool->attrs)
35457a4e344cSTejun Heo 		return -ENOMEM;
35467a4e344cSTejun Heo 	return 0;
35474e1a1f9aSTejun Heo }
35484e1a1f9aSTejun Heo 
3549669de8bdSBart Van Assche #ifdef CONFIG_LOCKDEP
3550669de8bdSBart Van Assche static void wq_init_lockdep(struct workqueue_struct *wq)
3551669de8bdSBart Van Assche {
3552669de8bdSBart Van Assche 	char *lock_name;
3553669de8bdSBart Van Assche 
3554669de8bdSBart Van Assche 	lockdep_register_key(&wq->key);
3555669de8bdSBart Van Assche 	lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3556669de8bdSBart Van Assche 	if (!lock_name)
3557669de8bdSBart Van Assche 		lock_name = wq->name;
355869a106c0SQian Cai 
355969a106c0SQian Cai 	wq->lock_name = lock_name;
3560669de8bdSBart Van Assche 	lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3561669de8bdSBart Van Assche }
3562669de8bdSBart Van Assche 
3563669de8bdSBart Van Assche static void wq_unregister_lockdep(struct workqueue_struct *wq)
3564669de8bdSBart Van Assche {
3565669de8bdSBart Van Assche 	lockdep_unregister_key(&wq->key);
3566669de8bdSBart Van Assche }
3567669de8bdSBart Van Assche 
3568669de8bdSBart Van Assche static void wq_free_lockdep(struct workqueue_struct *wq)
3569669de8bdSBart Van Assche {
3570669de8bdSBart Van Assche 	if (wq->lock_name != wq->name)
3571669de8bdSBart Van Assche 		kfree(wq->lock_name);
3572669de8bdSBart Van Assche }
3573669de8bdSBart Van Assche #else
3574669de8bdSBart Van Assche static void wq_init_lockdep(struct workqueue_struct *wq)
3575669de8bdSBart Van Assche {
3576669de8bdSBart Van Assche }
3577669de8bdSBart Van Assche 
3578669de8bdSBart Van Assche static void wq_unregister_lockdep(struct workqueue_struct *wq)
3579669de8bdSBart Van Assche {
3580669de8bdSBart Van Assche }
3581669de8bdSBart Van Assche 
3582669de8bdSBart Van Assche static void wq_free_lockdep(struct workqueue_struct *wq)
3583669de8bdSBart Van Assche {
3584669de8bdSBart Van Assche }
3585669de8bdSBart Van Assche #endif
3586669de8bdSBart Van Assche 
3587e2dca7adSTejun Heo static void rcu_free_wq(struct rcu_head *rcu)
3588e2dca7adSTejun Heo {
3589e2dca7adSTejun Heo 	struct workqueue_struct *wq =
3590e2dca7adSTejun Heo 		container_of(rcu, struct workqueue_struct, rcu);
3591e2dca7adSTejun Heo 
3592669de8bdSBart Van Assche 	wq_free_lockdep(wq);
3593669de8bdSBart Van Assche 
3594e2dca7adSTejun Heo 	if (!(wq->flags & WQ_UNBOUND))
3595e2dca7adSTejun Heo 		free_percpu(wq->cpu_pwqs);
3596e2dca7adSTejun Heo 	else
3597e2dca7adSTejun Heo 		free_workqueue_attrs(wq->unbound_attrs);
3598e2dca7adSTejun Heo 
3599e2dca7adSTejun Heo 	kfree(wq);
3600e2dca7adSTejun Heo }
3601e2dca7adSTejun Heo 
360229c91e99STejun Heo static void rcu_free_pool(struct rcu_head *rcu)
360329c91e99STejun Heo {
360429c91e99STejun Heo 	struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
360529c91e99STejun Heo 
36067cda9aaeSLai Jiangshan 	ida_destroy(&pool->worker_ida);
360729c91e99STejun Heo 	free_workqueue_attrs(pool->attrs);
360829c91e99STejun Heo 	kfree(pool);
360929c91e99STejun Heo }
361029c91e99STejun Heo 
3611d8bb65abSSebastian Andrzej Siewior /* This returns with the lock held on success (pool manager is inactive). */
3612d8bb65abSSebastian Andrzej Siewior static bool wq_manager_inactive(struct worker_pool *pool)
3613d8bb65abSSebastian Andrzej Siewior {
3614a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
3615d8bb65abSSebastian Andrzej Siewior 
3616d8bb65abSSebastian Andrzej Siewior 	if (pool->flags & POOL_MANAGER_ACTIVE) {
3617a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pool->lock);
3618d8bb65abSSebastian Andrzej Siewior 		return false;
3619d8bb65abSSebastian Andrzej Siewior 	}
3620d8bb65abSSebastian Andrzej Siewior 	return true;
3621d8bb65abSSebastian Andrzej Siewior }
3622d8bb65abSSebastian Andrzej Siewior 
362329c91e99STejun Heo /**
362429c91e99STejun Heo  * put_unbound_pool - put a worker_pool
362529c91e99STejun Heo  * @pool: worker_pool to put
362629c91e99STejun Heo  *
362724acfb71SThomas Gleixner  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
3628c5aa87bbSTejun Heo  * safe manner.  get_unbound_pool() calls this function on its failure path
3629c5aa87bbSTejun Heo  * and this function should be able to release pools which went through,
3630c5aa87bbSTejun Heo  * successfully or not, init_worker_pool().
3631a892caccSTejun Heo  *
3632a892caccSTejun Heo  * Should be called with wq_pool_mutex held.
363329c91e99STejun Heo  */
363429c91e99STejun Heo static void put_unbound_pool(struct worker_pool *pool)
363529c91e99STejun Heo {
363660f5a4bcSLai Jiangshan 	DECLARE_COMPLETION_ONSTACK(detach_completion);
363729c91e99STejun Heo 	struct worker *worker;
363829c91e99STejun Heo 
3639a892caccSTejun Heo 	lockdep_assert_held(&wq_pool_mutex);
3640a892caccSTejun Heo 
3641a892caccSTejun Heo 	if (--pool->refcnt)
364229c91e99STejun Heo 		return;
364329c91e99STejun Heo 
364429c91e99STejun Heo 	/* sanity checks */
364561d0fbb4SLai Jiangshan 	if (WARN_ON(!(pool->cpu < 0)) ||
3646a892caccSTejun Heo 	    WARN_ON(!list_empty(&pool->worklist)))
364729c91e99STejun Heo 		return;
364829c91e99STejun Heo 
364929c91e99STejun Heo 	/* release id and unhash */
365029c91e99STejun Heo 	if (pool->id >= 0)
365129c91e99STejun Heo 		idr_remove(&worker_pool_idr, pool->id);
365229c91e99STejun Heo 	hash_del(&pool->hash_node);
365329c91e99STejun Heo 
3654c5aa87bbSTejun Heo 	/*
3655692b4825STejun Heo 	 * Become the manager and destroy all workers.  This prevents
3656692b4825STejun Heo 	 * @pool's workers from blocking on attach_mutex.  We're the last
3657692b4825STejun Heo 	 * manager and @pool gets freed with the flag set.
3658d8bb65abSSebastian Andrzej Siewior 	 * Because of how wq_manager_inactive() works, we will hold the
3659d8bb65abSSebastian Andrzej Siewior 	 * spinlock after a successful wait.
3660c5aa87bbSTejun Heo 	 */
3661d8bb65abSSebastian Andrzej Siewior 	rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
3662d8bb65abSSebastian Andrzej Siewior 			   TASK_UNINTERRUPTIBLE);
3663692b4825STejun Heo 	pool->flags |= POOL_MANAGER_ACTIVE;
3664692b4825STejun Heo 
36651037de36SLai Jiangshan 	while ((worker = first_idle_worker(pool)))
366629c91e99STejun Heo 		destroy_worker(worker);
366729c91e99STejun Heo 	WARN_ON(pool->nr_workers || pool->nr_idle);
3668a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
366960f5a4bcSLai Jiangshan 
36701258fae7STejun Heo 	mutex_lock(&wq_pool_attach_mutex);
3671da028469SLai Jiangshan 	if (!list_empty(&pool->workers))
367260f5a4bcSLai Jiangshan 		pool->detach_completion = &detach_completion;
36731258fae7STejun Heo 	mutex_unlock(&wq_pool_attach_mutex);
367460f5a4bcSLai Jiangshan 
367560f5a4bcSLai Jiangshan 	if (pool->detach_completion)
367660f5a4bcSLai Jiangshan 		wait_for_completion(pool->detach_completion);
367760f5a4bcSLai Jiangshan 
367829c91e99STejun Heo 	/* shut down the timers */
367929c91e99STejun Heo 	del_timer_sync(&pool->idle_timer);
3680*3f959aa3SValentin Schneider 	cancel_work_sync(&pool->idle_cull_work);
368129c91e99STejun Heo 	del_timer_sync(&pool->mayday_timer);
368229c91e99STejun Heo 
368324acfb71SThomas Gleixner 	/* RCU protected to allow dereferences from get_work_pool() */
368425b00775SPaul E. McKenney 	call_rcu(&pool->rcu, rcu_free_pool);
368529c91e99STejun Heo }
368629c91e99STejun Heo 
368729c91e99STejun Heo /**
368829c91e99STejun Heo  * get_unbound_pool - get a worker_pool with the specified attributes
368929c91e99STejun Heo  * @attrs: the attributes of the worker_pool to get
369029c91e99STejun Heo  *
369129c91e99STejun Heo  * Obtain a worker_pool which has the same attributes as @attrs, bump the
369229c91e99STejun Heo  * reference count and return it.  If there already is a matching
369329c91e99STejun Heo  * worker_pool, it will be used; otherwise, this function attempts to
3694d185af30SYacine Belkadi  * create a new one.
3695a892caccSTejun Heo  *
3696a892caccSTejun Heo  * Should be called with wq_pool_mutex held.
3697d185af30SYacine Belkadi  *
3698d185af30SYacine Belkadi  * Return: On success, a worker_pool with the same attributes as @attrs.
3699d185af30SYacine Belkadi  * On failure, %NULL.
370029c91e99STejun Heo  */
370129c91e99STejun Heo static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
370229c91e99STejun Heo {
370329c91e99STejun Heo 	u32 hash = wqattrs_hash(attrs);
370429c91e99STejun Heo 	struct worker_pool *pool;
3705f3f90ad4STejun Heo 	int node;
3706e2273584SXunlei Pang 	int target_node = NUMA_NO_NODE;
370729c91e99STejun Heo 
3708a892caccSTejun Heo 	lockdep_assert_held(&wq_pool_mutex);
370929c91e99STejun Heo 
371029c91e99STejun Heo 	/* do we already have a matching pool? */
371129c91e99STejun Heo 	hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
371229c91e99STejun Heo 		if (wqattrs_equal(pool->attrs, attrs)) {
371329c91e99STejun Heo 			pool->refcnt++;
37143fb1823cSLai Jiangshan 			return pool;
371529c91e99STejun Heo 		}
371629c91e99STejun Heo 	}
371729c91e99STejun Heo 
3718e2273584SXunlei Pang 	/* if cpumask is contained inside a NUMA node, we belong to that node */
3719e2273584SXunlei Pang 	if (wq_numa_enabled) {
3720e2273584SXunlei Pang 		for_each_node(node) {
3721e2273584SXunlei Pang 			if (cpumask_subset(attrs->cpumask,
3722e2273584SXunlei Pang 					   wq_numa_possible_cpumask[node])) {
3723e2273584SXunlei Pang 				target_node = node;
3724e2273584SXunlei Pang 				break;
3725e2273584SXunlei Pang 			}
3726e2273584SXunlei Pang 		}
3727e2273584SXunlei Pang 	}
3728e2273584SXunlei Pang 
372929c91e99STejun Heo 	/* nope, create a new one */
3730e2273584SXunlei Pang 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
373129c91e99STejun Heo 	if (!pool || init_worker_pool(pool) < 0)
373229c91e99STejun Heo 		goto fail;
373329c91e99STejun Heo 
37348864b4e5STejun Heo 	lockdep_set_subclass(&pool->lock, 1);	/* see put_pwq() */
373529c91e99STejun Heo 	copy_workqueue_attrs(pool->attrs, attrs);
3736e2273584SXunlei Pang 	pool->node = target_node;
373729c91e99STejun Heo 
37382865a8fbSShaohua Li 	/*
37392865a8fbSShaohua Li 	 * no_numa isn't a worker_pool attribute, always clear it.  See
37402865a8fbSShaohua Li 	 * 'struct workqueue_attrs' comments for detail.
37412865a8fbSShaohua Li 	 */
37422865a8fbSShaohua Li 	pool->attrs->no_numa = false;
37432865a8fbSShaohua Li 
374429c91e99STejun Heo 	if (worker_pool_assign_id(pool) < 0)
374529c91e99STejun Heo 		goto fail;
374629c91e99STejun Heo 
374729c91e99STejun Heo 	/* create and start the initial worker */
37483347fa09STejun Heo 	if (wq_online && !create_worker(pool))
374929c91e99STejun Heo 		goto fail;
375029c91e99STejun Heo 
375129c91e99STejun Heo 	/* install */
375229c91e99STejun Heo 	hash_add(unbound_pool_hash, &pool->hash_node, hash);
37533fb1823cSLai Jiangshan 
375429c91e99STejun Heo 	return pool;
375529c91e99STejun Heo fail:
375629c91e99STejun Heo 	if (pool)
375729c91e99STejun Heo 		put_unbound_pool(pool);
375829c91e99STejun Heo 	return NULL;
375929c91e99STejun Heo }
376029c91e99STejun Heo 
37618864b4e5STejun Heo static void rcu_free_pwq(struct rcu_head *rcu)
37628864b4e5STejun Heo {
37638864b4e5STejun Heo 	kmem_cache_free(pwq_cache,
37648864b4e5STejun Heo 			container_of(rcu, struct pool_workqueue, rcu));
37658864b4e5STejun Heo }
37668864b4e5STejun Heo 
37678864b4e5STejun Heo /*
37688864b4e5STejun Heo  * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
37698864b4e5STejun Heo  * and needs to be destroyed.
37708864b4e5STejun Heo  */
37718864b4e5STejun Heo static void pwq_unbound_release_workfn(struct work_struct *work)
37728864b4e5STejun Heo {
37738864b4e5STejun Heo 	struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
37748864b4e5STejun Heo 						  unbound_release_work);
37758864b4e5STejun Heo 	struct workqueue_struct *wq = pwq->wq;
37768864b4e5STejun Heo 	struct worker_pool *pool = pwq->pool;
3777b42b0bddSYang Yingliang 	bool is_last = false;
37788864b4e5STejun Heo 
3779b42b0bddSYang Yingliang 	/*
3780b42b0bddSYang Yingliang 	 * when @pwq is not linked, it doesn't hold any reference to the
3781b42b0bddSYang Yingliang 	 * @wq, and @wq is invalid to access.
3782b42b0bddSYang Yingliang 	 */
3783b42b0bddSYang Yingliang 	if (!list_empty(&pwq->pwqs_node)) {
37848864b4e5STejun Heo 		if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
37858864b4e5STejun Heo 			return;
37868864b4e5STejun Heo 
37873c25a55dSLai Jiangshan 		mutex_lock(&wq->mutex);
37888864b4e5STejun Heo 		list_del_rcu(&pwq->pwqs_node);
3789bc0caf09STejun Heo 		is_last = list_empty(&wq->pwqs);
37903c25a55dSLai Jiangshan 		mutex_unlock(&wq->mutex);
3791b42b0bddSYang Yingliang 	}
37928864b4e5STejun Heo 
3793a892caccSTejun Heo 	mutex_lock(&wq_pool_mutex);
37948864b4e5STejun Heo 	put_unbound_pool(pool);
3795a892caccSTejun Heo 	mutex_unlock(&wq_pool_mutex);
3796a892caccSTejun Heo 
379725b00775SPaul E. McKenney 	call_rcu(&pwq->rcu, rcu_free_pwq);
37988864b4e5STejun Heo 
37998864b4e5STejun Heo 	/*
38008864b4e5STejun Heo 	 * If we're the last pwq going away, @wq is already dead and no one
3801e2dca7adSTejun Heo 	 * is gonna access it anymore.  Schedule RCU free.
38028864b4e5STejun Heo 	 */
3803669de8bdSBart Van Assche 	if (is_last) {
3804669de8bdSBart Van Assche 		wq_unregister_lockdep(wq);
380525b00775SPaul E. McKenney 		call_rcu(&wq->rcu, rcu_free_wq);
38066029a918STejun Heo 	}
3807669de8bdSBart Van Assche }
38088864b4e5STejun Heo 
38090fbd95aaSTejun Heo /**
3810699ce097STejun Heo  * pwq_adjust_max_active - update a pwq's max_active to the current setting
38110fbd95aaSTejun Heo  * @pwq: target pool_workqueue
38120fbd95aaSTejun Heo  *
3813699ce097STejun Heo  * If @pwq isn't freezing, set @pwq->max_active to the associated
3814f97a4a1aSLai Jiangshan  * workqueue's saved_max_active and activate inactive work items
3815699ce097STejun Heo  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
38160fbd95aaSTejun Heo  */
3817699ce097STejun Heo static void pwq_adjust_max_active(struct pool_workqueue *pwq)
38180fbd95aaSTejun Heo {
3819699ce097STejun Heo 	struct workqueue_struct *wq = pwq->wq;
3820699ce097STejun Heo 	bool freezable = wq->flags & WQ_FREEZABLE;
38213347fa09STejun Heo 	unsigned long flags;
3822699ce097STejun Heo 
3823699ce097STejun Heo 	/* for @wq->saved_max_active */
3824a357fc03SLai Jiangshan 	lockdep_assert_held(&wq->mutex);
3825699ce097STejun Heo 
3826699ce097STejun Heo 	/* fast exit for non-freezable wqs */
3827699ce097STejun Heo 	if (!freezable && pwq->max_active == wq->saved_max_active)
3828699ce097STejun Heo 		return;
3829699ce097STejun Heo 
38303347fa09STejun Heo 	/* this function can be called during early boot w/ irq disabled */
3831a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3832699ce097STejun Heo 
383374b414eaSLai Jiangshan 	/*
383474b414eaSLai Jiangshan 	 * During [un]freezing, the caller is responsible for ensuring that
383574b414eaSLai Jiangshan 	 * this function is called at least once after @workqueue_freezing
383674b414eaSLai Jiangshan 	 * is updated and visible.
383774b414eaSLai Jiangshan 	 */
383874b414eaSLai Jiangshan 	if (!freezable || !workqueue_freezing) {
383901341fbdSYunfeng Ye 		bool kick = false;
384001341fbdSYunfeng Ye 
3841699ce097STejun Heo 		pwq->max_active = wq->saved_max_active;
38420fbd95aaSTejun Heo 
3843f97a4a1aSLai Jiangshan 		while (!list_empty(&pwq->inactive_works) &&
384401341fbdSYunfeng Ye 		       pwq->nr_active < pwq->max_active) {
3845f97a4a1aSLai Jiangshan 			pwq_activate_first_inactive(pwq);
384601341fbdSYunfeng Ye 			kick = true;
384701341fbdSYunfeng Ye 		}
3848951a078aSLai Jiangshan 
3849951a078aSLai Jiangshan 		/*
3850951a078aSLai Jiangshan 		 * Need to kick a worker after thawed or an unbound wq's
385101341fbdSYunfeng Ye 		 * max_active is bumped. In realtime scenarios, always kicking a
385201341fbdSYunfeng Ye 		 * worker will cause interference on the isolated cpu cores, so
385301341fbdSYunfeng Ye 		 * let's kick iff work items were activated.
3854951a078aSLai Jiangshan 		 */
385501341fbdSYunfeng Ye 		if (kick)
3856951a078aSLai Jiangshan 			wake_up_worker(pwq->pool);
3857699ce097STejun Heo 	} else {
3858699ce097STejun Heo 		pwq->max_active = 0;
3859699ce097STejun Heo 	}
3860699ce097STejun Heo 
3861a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
38620fbd95aaSTejun Heo }
38630fbd95aaSTejun Heo 
386467dc8325SCai Huoqing /* initialize newly allocated @pwq which is associated with @wq and @pool */
3865f147f29eSTejun Heo static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3866f147f29eSTejun Heo 		     struct worker_pool *pool)
3867d2c1d404STejun Heo {
3868d2c1d404STejun Heo 	BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3869d2c1d404STejun Heo 
3870e50aba9aSTejun Heo 	memset(pwq, 0, sizeof(*pwq));
3871e50aba9aSTejun Heo 
3872d2c1d404STejun Heo 	pwq->pool = pool;
3873d2c1d404STejun Heo 	pwq->wq = wq;
3874d2c1d404STejun Heo 	pwq->flush_color = -1;
38758864b4e5STejun Heo 	pwq->refcnt = 1;
3876f97a4a1aSLai Jiangshan 	INIT_LIST_HEAD(&pwq->inactive_works);
38771befcf30STejun Heo 	INIT_LIST_HEAD(&pwq->pwqs_node);
3878d2c1d404STejun Heo 	INIT_LIST_HEAD(&pwq->mayday_node);
38798864b4e5STejun Heo 	INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3880f147f29eSTejun Heo }
3881d2c1d404STejun Heo 
3882f147f29eSTejun Heo /* sync @pwq with the current state of its associated wq and link it */
38831befcf30STejun Heo static void link_pwq(struct pool_workqueue *pwq)
3884f147f29eSTejun Heo {
3885f147f29eSTejun Heo 	struct workqueue_struct *wq = pwq->wq;
3886f147f29eSTejun Heo 
3887f147f29eSTejun Heo 	lockdep_assert_held(&wq->mutex);
388875ccf595STejun Heo 
38891befcf30STejun Heo 	/* may be called multiple times, ignore if already linked */
38901befcf30STejun Heo 	if (!list_empty(&pwq->pwqs_node))
38911befcf30STejun Heo 		return;
38921befcf30STejun Heo 
389329b1cb41SLai Jiangshan 	/* set the matching work_color */
389475ccf595STejun Heo 	pwq->work_color = wq->work_color;
3895983ca25eSTejun Heo 
3896983ca25eSTejun Heo 	/* sync max_active to the current setting */
3897983ca25eSTejun Heo 	pwq_adjust_max_active(pwq);
3898983ca25eSTejun Heo 
3899983ca25eSTejun Heo 	/* link in @pwq */
39009e8cd2f5STejun Heo 	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3901df2d5ae4STejun Heo }
39026029a918STejun Heo 
3903f147f29eSTejun Heo /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3904f147f29eSTejun Heo static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3905f147f29eSTejun Heo 					const struct workqueue_attrs *attrs)
3906f147f29eSTejun Heo {
3907f147f29eSTejun Heo 	struct worker_pool *pool;
3908f147f29eSTejun Heo 	struct pool_workqueue *pwq;
3909f147f29eSTejun Heo 
3910f147f29eSTejun Heo 	lockdep_assert_held(&wq_pool_mutex);
3911f147f29eSTejun Heo 
3912f147f29eSTejun Heo 	pool = get_unbound_pool(attrs);
3913f147f29eSTejun Heo 	if (!pool)
3914f147f29eSTejun Heo 		return NULL;
3915f147f29eSTejun Heo 
3916e50aba9aSTejun Heo 	pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3917f147f29eSTejun Heo 	if (!pwq) {
3918f147f29eSTejun Heo 		put_unbound_pool(pool);
3919f147f29eSTejun Heo 		return NULL;
3920f147f29eSTejun Heo 	}
3921f147f29eSTejun Heo 
3922f147f29eSTejun Heo 	init_pwq(pwq, wq, pool);
3923f147f29eSTejun Heo 	return pwq;
3924d2c1d404STejun Heo }
3925d2c1d404STejun Heo 
39264c16bd32STejun Heo /**
392730186c6fSGong Zhaogang  * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3928042f7df1SLai Jiangshan  * @attrs: the wq_attrs of the default pwq of the target workqueue
39294c16bd32STejun Heo  * @node: the target NUMA node
39304c16bd32STejun Heo  * @cpu_going_down: if >= 0, the CPU to consider as offline
39314c16bd32STejun Heo  * @cpumask: outarg, the resulting cpumask
39324c16bd32STejun Heo  *
39334c16bd32STejun Heo  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
39344c16bd32STejun Heo  * @cpu_going_down is >= 0, that cpu is considered offline during
3935d185af30SYacine Belkadi  * calculation.  The result is stored in @cpumask.
39364c16bd32STejun Heo  *
39374c16bd32STejun Heo  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
39384c16bd32STejun Heo  * enabled and @node has online CPUs requested by @attrs, the returned
39394c16bd32STejun Heo  * cpumask is the intersection of the possible CPUs of @node and
39404c16bd32STejun Heo  * @attrs->cpumask.
39414c16bd32STejun Heo  *
39424c16bd32STejun Heo  * The caller is responsible for ensuring that the cpumask of @node stays
39434c16bd32STejun Heo  * stable.
3944d185af30SYacine Belkadi  *
3945d185af30SYacine Belkadi  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3946d185af30SYacine Belkadi  * %false if equal.
39474c16bd32STejun Heo  */
39484c16bd32STejun Heo static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
39494c16bd32STejun Heo 				 int cpu_going_down, cpumask_t *cpumask)
39504c16bd32STejun Heo {
3951d55262c4STejun Heo 	if (!wq_numa_enabled || attrs->no_numa)
39524c16bd32STejun Heo 		goto use_dfl;
39534c16bd32STejun Heo 
39544c16bd32STejun Heo 	/* does @node have any online CPUs @attrs wants? */
39554c16bd32STejun Heo 	cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
39564c16bd32STejun Heo 	if (cpu_going_down >= 0)
39574c16bd32STejun Heo 		cpumask_clear_cpu(cpu_going_down, cpumask);
39584c16bd32STejun Heo 
39594c16bd32STejun Heo 	if (cpumask_empty(cpumask))
39604c16bd32STejun Heo 		goto use_dfl;
39614c16bd32STejun Heo 
39624c16bd32STejun Heo 	/* yeap, return possible CPUs in @node that @attrs wants */
39634c16bd32STejun Heo 	cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
39641ad0f0a7SMichael Bringmann 
39651ad0f0a7SMichael Bringmann 	if (cpumask_empty(cpumask)) {
39661ad0f0a7SMichael Bringmann 		pr_warn_once("WARNING: workqueue cpumask: online intersect > "
39671ad0f0a7SMichael Bringmann 				"possible intersect\n");
39681ad0f0a7SMichael Bringmann 		return false;
39691ad0f0a7SMichael Bringmann 	}
39701ad0f0a7SMichael Bringmann 
39714c16bd32STejun Heo 	return !cpumask_equal(cpumask, attrs->cpumask);
39724c16bd32STejun Heo 
39734c16bd32STejun Heo use_dfl:
39744c16bd32STejun Heo 	cpumask_copy(cpumask, attrs->cpumask);
39754c16bd32STejun Heo 	return false;
39764c16bd32STejun Heo }
39774c16bd32STejun Heo 
39781befcf30STejun Heo /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
39791befcf30STejun Heo static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
39801befcf30STejun Heo 						   int node,
39811befcf30STejun Heo 						   struct pool_workqueue *pwq)
39821befcf30STejun Heo {
39831befcf30STejun Heo 	struct pool_workqueue *old_pwq;
39841befcf30STejun Heo 
39855b95e1afSLai Jiangshan 	lockdep_assert_held(&wq_pool_mutex);
39861befcf30STejun Heo 	lockdep_assert_held(&wq->mutex);
39871befcf30STejun Heo 
39881befcf30STejun Heo 	/* link_pwq() can handle duplicate calls */
39891befcf30STejun Heo 	link_pwq(pwq);
39901befcf30STejun Heo 
39911befcf30STejun Heo 	old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
39921befcf30STejun Heo 	rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
39931befcf30STejun Heo 	return old_pwq;
39941befcf30STejun Heo }
39951befcf30STejun Heo 
39962d5f0764SLai Jiangshan /* context to store the prepared attrs & pwqs before applying */
39972d5f0764SLai Jiangshan struct apply_wqattrs_ctx {
39982d5f0764SLai Jiangshan 	struct workqueue_struct	*wq;		/* target workqueue */
39992d5f0764SLai Jiangshan 	struct workqueue_attrs	*attrs;		/* attrs to apply */
4000042f7df1SLai Jiangshan 	struct list_head	list;		/* queued for batching commit */
40012d5f0764SLai Jiangshan 	struct pool_workqueue	*dfl_pwq;
40022d5f0764SLai Jiangshan 	struct pool_workqueue	*pwq_tbl[];
40032d5f0764SLai Jiangshan };
40042d5f0764SLai Jiangshan 
40052d5f0764SLai Jiangshan /* free the resources after success or abort */
40062d5f0764SLai Jiangshan static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
40072d5f0764SLai Jiangshan {
40082d5f0764SLai Jiangshan 	if (ctx) {
40092d5f0764SLai Jiangshan 		int node;
40102d5f0764SLai Jiangshan 
40112d5f0764SLai Jiangshan 		for_each_node(node)
40122d5f0764SLai Jiangshan 			put_pwq_unlocked(ctx->pwq_tbl[node]);
40132d5f0764SLai Jiangshan 		put_pwq_unlocked(ctx->dfl_pwq);
40142d5f0764SLai Jiangshan 
40152d5f0764SLai Jiangshan 		free_workqueue_attrs(ctx->attrs);
40162d5f0764SLai Jiangshan 
40172d5f0764SLai Jiangshan 		kfree(ctx);
40182d5f0764SLai Jiangshan 	}
40192d5f0764SLai Jiangshan }
40202d5f0764SLai Jiangshan 
40212d5f0764SLai Jiangshan /* allocate the attrs and pwqs for later installation */
40222d5f0764SLai Jiangshan static struct apply_wqattrs_ctx *
40232d5f0764SLai Jiangshan apply_wqattrs_prepare(struct workqueue_struct *wq,
402499c621efSLai Jiangshan 		      const struct workqueue_attrs *attrs,
402599c621efSLai Jiangshan 		      const cpumask_var_t unbound_cpumask)
40262d5f0764SLai Jiangshan {
40272d5f0764SLai Jiangshan 	struct apply_wqattrs_ctx *ctx;
40282d5f0764SLai Jiangshan 	struct workqueue_attrs *new_attrs, *tmp_attrs;
40292d5f0764SLai Jiangshan 	int node;
40302d5f0764SLai Jiangshan 
40312d5f0764SLai Jiangshan 	lockdep_assert_held(&wq_pool_mutex);
40322d5f0764SLai Jiangshan 
4033acafe7e3SKees Cook 	ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
40342d5f0764SLai Jiangshan 
4035be69d00dSThomas Gleixner 	new_attrs = alloc_workqueue_attrs();
4036be69d00dSThomas Gleixner 	tmp_attrs = alloc_workqueue_attrs();
40372d5f0764SLai Jiangshan 	if (!ctx || !new_attrs || !tmp_attrs)
40382d5f0764SLai Jiangshan 		goto out_free;
40392d5f0764SLai Jiangshan 
4040042f7df1SLai Jiangshan 	/*
404199c621efSLai Jiangshan 	 * Calculate the attrs of the default pwq with unbound_cpumask
404299c621efSLai Jiangshan 	 * which is wq_unbound_cpumask or to set to wq_unbound_cpumask.
4043042f7df1SLai Jiangshan 	 * If the user configured cpumask doesn't overlap with the
4044042f7df1SLai Jiangshan 	 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
4045042f7df1SLai Jiangshan 	 */
40462d5f0764SLai Jiangshan 	copy_workqueue_attrs(new_attrs, attrs);
404799c621efSLai Jiangshan 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, unbound_cpumask);
4048042f7df1SLai Jiangshan 	if (unlikely(cpumask_empty(new_attrs->cpumask)))
404999c621efSLai Jiangshan 		cpumask_copy(new_attrs->cpumask, unbound_cpumask);
40502d5f0764SLai Jiangshan 
40512d5f0764SLai Jiangshan 	/*
40522d5f0764SLai Jiangshan 	 * We may create multiple pwqs with differing cpumasks.  Make a
40532d5f0764SLai Jiangshan 	 * copy of @new_attrs which will be modified and used to obtain
40542d5f0764SLai Jiangshan 	 * pools.
40552d5f0764SLai Jiangshan 	 */
40562d5f0764SLai Jiangshan 	copy_workqueue_attrs(tmp_attrs, new_attrs);
40572d5f0764SLai Jiangshan 
40582d5f0764SLai Jiangshan 	/*
40592d5f0764SLai Jiangshan 	 * If something goes wrong during CPU up/down, we'll fall back to
40602d5f0764SLai Jiangshan 	 * the default pwq covering whole @attrs->cpumask.  Always create
40612d5f0764SLai Jiangshan 	 * it even if we don't use it immediately.
40622d5f0764SLai Jiangshan 	 */
40632d5f0764SLai Jiangshan 	ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
40642d5f0764SLai Jiangshan 	if (!ctx->dfl_pwq)
40652d5f0764SLai Jiangshan 		goto out_free;
40662d5f0764SLai Jiangshan 
40672d5f0764SLai Jiangshan 	for_each_node(node) {
4068042f7df1SLai Jiangshan 		if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
40692d5f0764SLai Jiangshan 			ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
40702d5f0764SLai Jiangshan 			if (!ctx->pwq_tbl[node])
40712d5f0764SLai Jiangshan 				goto out_free;
40722d5f0764SLai Jiangshan 		} else {
40732d5f0764SLai Jiangshan 			ctx->dfl_pwq->refcnt++;
40742d5f0764SLai Jiangshan 			ctx->pwq_tbl[node] = ctx->dfl_pwq;
40752d5f0764SLai Jiangshan 		}
40762d5f0764SLai Jiangshan 	}
40772d5f0764SLai Jiangshan 
4078042f7df1SLai Jiangshan 	/* save the user configured attrs and sanitize it. */
4079042f7df1SLai Jiangshan 	copy_workqueue_attrs(new_attrs, attrs);
4080042f7df1SLai Jiangshan 	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
40812d5f0764SLai Jiangshan 	ctx->attrs = new_attrs;
4082042f7df1SLai Jiangshan 
40832d5f0764SLai Jiangshan 	ctx->wq = wq;
40842d5f0764SLai Jiangshan 	free_workqueue_attrs(tmp_attrs);
40852d5f0764SLai Jiangshan 	return ctx;
40862d5f0764SLai Jiangshan 
40872d5f0764SLai Jiangshan out_free:
40882d5f0764SLai Jiangshan 	free_workqueue_attrs(tmp_attrs);
40892d5f0764SLai Jiangshan 	free_workqueue_attrs(new_attrs);
40902d5f0764SLai Jiangshan 	apply_wqattrs_cleanup(ctx);
40912d5f0764SLai Jiangshan 	return NULL;
40922d5f0764SLai Jiangshan }
40932d5f0764SLai Jiangshan 
40942d5f0764SLai Jiangshan /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
40952d5f0764SLai Jiangshan static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
40962d5f0764SLai Jiangshan {
40972d5f0764SLai Jiangshan 	int node;
40982d5f0764SLai Jiangshan 
40992d5f0764SLai Jiangshan 	/* all pwqs have been created successfully, let's install'em */
41002d5f0764SLai Jiangshan 	mutex_lock(&ctx->wq->mutex);
41012d5f0764SLai Jiangshan 
41022d5f0764SLai Jiangshan 	copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
41032d5f0764SLai Jiangshan 
41042d5f0764SLai Jiangshan 	/* save the previous pwq and install the new one */
41052d5f0764SLai Jiangshan 	for_each_node(node)
41062d5f0764SLai Jiangshan 		ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
41072d5f0764SLai Jiangshan 							  ctx->pwq_tbl[node]);
41082d5f0764SLai Jiangshan 
41092d5f0764SLai Jiangshan 	/* @dfl_pwq might not have been used, ensure it's linked */
41102d5f0764SLai Jiangshan 	link_pwq(ctx->dfl_pwq);
41112d5f0764SLai Jiangshan 	swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
41122d5f0764SLai Jiangshan 
41132d5f0764SLai Jiangshan 	mutex_unlock(&ctx->wq->mutex);
41142d5f0764SLai Jiangshan }
41152d5f0764SLai Jiangshan 
4116a0111cf6SLai Jiangshan static void apply_wqattrs_lock(void)
4117a0111cf6SLai Jiangshan {
4118a0111cf6SLai Jiangshan 	/* CPUs should stay stable across pwq creations and installations */
4119ffd8bea8SSebastian Andrzej Siewior 	cpus_read_lock();
4120a0111cf6SLai Jiangshan 	mutex_lock(&wq_pool_mutex);
4121a0111cf6SLai Jiangshan }
4122a0111cf6SLai Jiangshan 
4123a0111cf6SLai Jiangshan static void apply_wqattrs_unlock(void)
4124a0111cf6SLai Jiangshan {
4125a0111cf6SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
4126ffd8bea8SSebastian Andrzej Siewior 	cpus_read_unlock();
4127a0111cf6SLai Jiangshan }
4128a0111cf6SLai Jiangshan 
4129a0111cf6SLai Jiangshan static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4130a0111cf6SLai Jiangshan 					const struct workqueue_attrs *attrs)
4131a0111cf6SLai Jiangshan {
4132a0111cf6SLai Jiangshan 	struct apply_wqattrs_ctx *ctx;
4133a0111cf6SLai Jiangshan 
4134a0111cf6SLai Jiangshan 	/* only unbound workqueues can change attributes */
4135a0111cf6SLai Jiangshan 	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4136a0111cf6SLai Jiangshan 		return -EINVAL;
4137a0111cf6SLai Jiangshan 
4138a0111cf6SLai Jiangshan 	/* creating multiple pwqs breaks ordering guarantee */
41390a94efb5STejun Heo 	if (!list_empty(&wq->pwqs)) {
41400a94efb5STejun Heo 		if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4141a0111cf6SLai Jiangshan 			return -EINVAL;
4142a0111cf6SLai Jiangshan 
41430a94efb5STejun Heo 		wq->flags &= ~__WQ_ORDERED;
41440a94efb5STejun Heo 	}
41450a94efb5STejun Heo 
414699c621efSLai Jiangshan 	ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
41476201171eSwanghaibin 	if (!ctx)
41486201171eSwanghaibin 		return -ENOMEM;
4149a0111cf6SLai Jiangshan 
4150a0111cf6SLai Jiangshan 	/* the ctx has been prepared successfully, let's commit it */
4151a0111cf6SLai Jiangshan 	apply_wqattrs_commit(ctx);
4152a0111cf6SLai Jiangshan 	apply_wqattrs_cleanup(ctx);
4153a0111cf6SLai Jiangshan 
41546201171eSwanghaibin 	return 0;
4155a0111cf6SLai Jiangshan }
4156a0111cf6SLai Jiangshan 
41579e8cd2f5STejun Heo /**
41589e8cd2f5STejun Heo  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
41599e8cd2f5STejun Heo  * @wq: the target workqueue
41609e8cd2f5STejun Heo  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
41619e8cd2f5STejun Heo  *
41624c16bd32STejun Heo  * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
41634c16bd32STejun Heo  * machines, this function maps a separate pwq to each NUMA node with
41644c16bd32STejun Heo  * possibles CPUs in @attrs->cpumask so that work items are affine to the
41654c16bd32STejun Heo  * NUMA node it was issued on.  Older pwqs are released as in-flight work
41664c16bd32STejun Heo  * items finish.  Note that a work item which repeatedly requeues itself
41674c16bd32STejun Heo  * back-to-back will stay on its current pwq.
41689e8cd2f5STejun Heo  *
4169d185af30SYacine Belkadi  * Performs GFP_KERNEL allocations.
4170d185af30SYacine Belkadi  *
4171ffd8bea8SSebastian Andrzej Siewior  * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
4172509b3204SDaniel Jordan  *
4173d185af30SYacine Belkadi  * Return: 0 on success and -errno on failure.
41749e8cd2f5STejun Heo  */
4175513c98d0SDaniel Jordan int apply_workqueue_attrs(struct workqueue_struct *wq,
41769e8cd2f5STejun Heo 			  const struct workqueue_attrs *attrs)
41779e8cd2f5STejun Heo {
4178a0111cf6SLai Jiangshan 	int ret;
41799e8cd2f5STejun Heo 
4180509b3204SDaniel Jordan 	lockdep_assert_cpus_held();
4181509b3204SDaniel Jordan 
4182509b3204SDaniel Jordan 	mutex_lock(&wq_pool_mutex);
4183a0111cf6SLai Jiangshan 	ret = apply_workqueue_attrs_locked(wq, attrs);
4184509b3204SDaniel Jordan 	mutex_unlock(&wq_pool_mutex);
41852d5f0764SLai Jiangshan 
41862d5f0764SLai Jiangshan 	return ret;
41879e8cd2f5STejun Heo }
41889e8cd2f5STejun Heo 
41894c16bd32STejun Heo /**
41904c16bd32STejun Heo  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
41914c16bd32STejun Heo  * @wq: the target workqueue
41924c16bd32STejun Heo  * @cpu: the CPU coming up or going down
41934c16bd32STejun Heo  * @online: whether @cpu is coming up or going down
41944c16bd32STejun Heo  *
41954c16bd32STejun Heo  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
41964c16bd32STejun Heo  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
41974c16bd32STejun Heo  * @wq accordingly.
41984c16bd32STejun Heo  *
41994c16bd32STejun Heo  * If NUMA affinity can't be adjusted due to memory allocation failure, it
42004c16bd32STejun Heo  * falls back to @wq->dfl_pwq which may not be optimal but is always
42014c16bd32STejun Heo  * correct.
42024c16bd32STejun Heo  *
42034c16bd32STejun Heo  * Note that when the last allowed CPU of a NUMA node goes offline for a
42044c16bd32STejun Heo  * workqueue with a cpumask spanning multiple nodes, the workers which were
42054c16bd32STejun Heo  * already executing the work items for the workqueue will lose their CPU
42064c16bd32STejun Heo  * affinity and may execute on any CPU.  This is similar to how per-cpu
42074c16bd32STejun Heo  * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
42084c16bd32STejun Heo  * affinity, it's the user's responsibility to flush the work item from
42094c16bd32STejun Heo  * CPU_DOWN_PREPARE.
42104c16bd32STejun Heo  */
42114c16bd32STejun Heo static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
42124c16bd32STejun Heo 				   bool online)
42134c16bd32STejun Heo {
42144c16bd32STejun Heo 	int node = cpu_to_node(cpu);
42154c16bd32STejun Heo 	int cpu_off = online ? -1 : cpu;
42164c16bd32STejun Heo 	struct pool_workqueue *old_pwq = NULL, *pwq;
42174c16bd32STejun Heo 	struct workqueue_attrs *target_attrs;
42184c16bd32STejun Heo 	cpumask_t *cpumask;
42194c16bd32STejun Heo 
42204c16bd32STejun Heo 	lockdep_assert_held(&wq_pool_mutex);
42214c16bd32STejun Heo 
4222f7142ed4SLai Jiangshan 	if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4223f7142ed4SLai Jiangshan 	    wq->unbound_attrs->no_numa)
42244c16bd32STejun Heo 		return;
42254c16bd32STejun Heo 
42264c16bd32STejun Heo 	/*
42274c16bd32STejun Heo 	 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
42284c16bd32STejun Heo 	 * Let's use a preallocated one.  The following buf is protected by
42294c16bd32STejun Heo 	 * CPU hotplug exclusion.
42304c16bd32STejun Heo 	 */
42314c16bd32STejun Heo 	target_attrs = wq_update_unbound_numa_attrs_buf;
42324c16bd32STejun Heo 	cpumask = target_attrs->cpumask;
42334c16bd32STejun Heo 
42344c16bd32STejun Heo 	copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
42354c16bd32STejun Heo 	pwq = unbound_pwq_by_node(wq, node);
42364c16bd32STejun Heo 
42374c16bd32STejun Heo 	/*
42384c16bd32STejun Heo 	 * Let's determine what needs to be done.  If the target cpumask is
4239042f7df1SLai Jiangshan 	 * different from the default pwq's, we need to compare it to @pwq's
4240042f7df1SLai Jiangshan 	 * and create a new one if they don't match.  If the target cpumask
4241042f7df1SLai Jiangshan 	 * equals the default pwq's, the default pwq should be used.
42424c16bd32STejun Heo 	 */
4243042f7df1SLai Jiangshan 	if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
42444c16bd32STejun Heo 		if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4245f7142ed4SLai Jiangshan 			return;
42464c16bd32STejun Heo 	} else {
42474c16bd32STejun Heo 		goto use_dfl_pwq;
42484c16bd32STejun Heo 	}
42494c16bd32STejun Heo 
42504c16bd32STejun Heo 	/* create a new pwq */
42514c16bd32STejun Heo 	pwq = alloc_unbound_pwq(wq, target_attrs);
42524c16bd32STejun Heo 	if (!pwq) {
42532d916033SFabian Frederick 		pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
42544c16bd32STejun Heo 			wq->name);
425577f300b1SDaeseok Youn 		goto use_dfl_pwq;
42564c16bd32STejun Heo 	}
42574c16bd32STejun Heo 
4258f7142ed4SLai Jiangshan 	/* Install the new pwq. */
42594c16bd32STejun Heo 	mutex_lock(&wq->mutex);
42604c16bd32STejun Heo 	old_pwq = numa_pwq_tbl_install(wq, node, pwq);
42614c16bd32STejun Heo 	goto out_unlock;
42624c16bd32STejun Heo 
42634c16bd32STejun Heo use_dfl_pwq:
4264f7142ed4SLai Jiangshan 	mutex_lock(&wq->mutex);
4265a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
42664c16bd32STejun Heo 	get_pwq(wq->dfl_pwq);
4267a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
42684c16bd32STejun Heo 	old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
42694c16bd32STejun Heo out_unlock:
42704c16bd32STejun Heo 	mutex_unlock(&wq->mutex);
42714c16bd32STejun Heo 	put_pwq_unlocked(old_pwq);
42724c16bd32STejun Heo }
42734c16bd32STejun Heo 
427430cdf249STejun Heo static int alloc_and_link_pwqs(struct workqueue_struct *wq)
42751da177e4SLinus Torvalds {
427649e3cf44STejun Heo 	bool highpri = wq->flags & WQ_HIGHPRI;
42778a2b7538STejun Heo 	int cpu, ret;
4278e1d8aa9fSFrederic Weisbecker 
427930cdf249STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
4280420c0ddbSTejun Heo 		wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4281420c0ddbSTejun Heo 		if (!wq->cpu_pwqs)
428230cdf249STejun Heo 			return -ENOMEM;
428330cdf249STejun Heo 
428430cdf249STejun Heo 		for_each_possible_cpu(cpu) {
42857fb98ea7STejun Heo 			struct pool_workqueue *pwq =
42867fb98ea7STejun Heo 				per_cpu_ptr(wq->cpu_pwqs, cpu);
42877a62c2c8STejun Heo 			struct worker_pool *cpu_pools =
4288f02ae73aSTejun Heo 				per_cpu(cpu_worker_pools, cpu);
428930cdf249STejun Heo 
4290f147f29eSTejun Heo 			init_pwq(pwq, wq, &cpu_pools[highpri]);
4291f147f29eSTejun Heo 
4292f147f29eSTejun Heo 			mutex_lock(&wq->mutex);
42931befcf30STejun Heo 			link_pwq(pwq);
4294f147f29eSTejun Heo 			mutex_unlock(&wq->mutex);
429530cdf249STejun Heo 		}
429630cdf249STejun Heo 		return 0;
4297509b3204SDaniel Jordan 	}
4298509b3204SDaniel Jordan 
4299ffd8bea8SSebastian Andrzej Siewior 	cpus_read_lock();
4300509b3204SDaniel Jordan 	if (wq->flags & __WQ_ORDERED) {
43018a2b7538STejun Heo 		ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
43028a2b7538STejun Heo 		/* there should only be single pwq for ordering guarantee */
43038a2b7538STejun Heo 		WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
43048a2b7538STejun Heo 			      wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
43058a2b7538STejun Heo 		     "ordering guarantee broken for workqueue %s\n", wq->name);
43069e8cd2f5STejun Heo 	} else {
4307509b3204SDaniel Jordan 		ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
43089e8cd2f5STejun Heo 	}
4309ffd8bea8SSebastian Andrzej Siewior 	cpus_read_unlock();
4310509b3204SDaniel Jordan 
4311509b3204SDaniel Jordan 	return ret;
43120f900049STejun Heo }
43130f900049STejun Heo 
4314f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags,
4315f3421797STejun Heo 			       const char *name)
4316b71ab8c2STejun Heo {
4317f3421797STejun Heo 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4318f3421797STejun Heo 
4319f3421797STejun Heo 	if (max_active < 1 || max_active > lim)
4320044c782cSValentin Ilie 		pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4321f3421797STejun Heo 			max_active, name, 1, lim);
4322b71ab8c2STejun Heo 
4323f3421797STejun Heo 	return clamp_val(max_active, 1, lim);
4324b71ab8c2STejun Heo }
4325b71ab8c2STejun Heo 
4326983c7515STejun Heo /*
4327983c7515STejun Heo  * Workqueues which may be used during memory reclaim should have a rescuer
4328983c7515STejun Heo  * to guarantee forward progress.
4329983c7515STejun Heo  */
4330983c7515STejun Heo static int init_rescuer(struct workqueue_struct *wq)
4331983c7515STejun Heo {
4332983c7515STejun Heo 	struct worker *rescuer;
4333b92b36eaSDan Carpenter 	int ret;
4334983c7515STejun Heo 
4335983c7515STejun Heo 	if (!(wq->flags & WQ_MEM_RECLAIM))
4336983c7515STejun Heo 		return 0;
4337983c7515STejun Heo 
4338983c7515STejun Heo 	rescuer = alloc_worker(NUMA_NO_NODE);
4339983c7515STejun Heo 	if (!rescuer)
4340983c7515STejun Heo 		return -ENOMEM;
4341983c7515STejun Heo 
4342983c7515STejun Heo 	rescuer->rescue_wq = wq;
4343983c7515STejun Heo 	rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4344f187b697SSean Fu 	if (IS_ERR(rescuer->task)) {
4345b92b36eaSDan Carpenter 		ret = PTR_ERR(rescuer->task);
4346983c7515STejun Heo 		kfree(rescuer);
4347b92b36eaSDan Carpenter 		return ret;
4348983c7515STejun Heo 	}
4349983c7515STejun Heo 
4350983c7515STejun Heo 	wq->rescuer = rescuer;
4351983c7515STejun Heo 	kthread_bind_mask(rescuer->task, cpu_possible_mask);
4352983c7515STejun Heo 	wake_up_process(rescuer->task);
4353983c7515STejun Heo 
4354983c7515STejun Heo 	return 0;
4355983c7515STejun Heo }
4356983c7515STejun Heo 
4357a2775bbcSMathieu Malaterre __printf(1, 4)
4358669de8bdSBart Van Assche struct workqueue_struct *alloc_workqueue(const char *fmt,
435997e37d7bSTejun Heo 					 unsigned int flags,
4360669de8bdSBart Van Assche 					 int max_active, ...)
43613af24433SOleg Nesterov {
4362df2d5ae4STejun Heo 	size_t tbl_size = 0;
4363ecf6881fSTejun Heo 	va_list args;
43643af24433SOleg Nesterov 	struct workqueue_struct *wq;
436549e3cf44STejun Heo 	struct pool_workqueue *pwq;
4366b196be89STejun Heo 
43675c0338c6STejun Heo 	/*
43685c0338c6STejun Heo 	 * Unbound && max_active == 1 used to imply ordered, which is no
43695c0338c6STejun Heo 	 * longer the case on NUMA machines due to per-node pools.  While
43705c0338c6STejun Heo 	 * alloc_ordered_workqueue() is the right way to create an ordered
43715c0338c6STejun Heo 	 * workqueue, keep the previous behavior to avoid subtle breakages
43725c0338c6STejun Heo 	 * on NUMA.
43735c0338c6STejun Heo 	 */
43745c0338c6STejun Heo 	if ((flags & WQ_UNBOUND) && max_active == 1)
43755c0338c6STejun Heo 		flags |= __WQ_ORDERED;
43765c0338c6STejun Heo 
4377cee22a15SViresh Kumar 	/* see the comment above the definition of WQ_POWER_EFFICIENT */
4378cee22a15SViresh Kumar 	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4379cee22a15SViresh Kumar 		flags |= WQ_UNBOUND;
4380cee22a15SViresh Kumar 
4381ecf6881fSTejun Heo 	/* allocate wq and format name */
4382df2d5ae4STejun Heo 	if (flags & WQ_UNBOUND)
4383ddcb57e2SLai Jiangshan 		tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4384df2d5ae4STejun Heo 
4385df2d5ae4STejun Heo 	wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4386b196be89STejun Heo 	if (!wq)
4387d2c1d404STejun Heo 		return NULL;
4388b196be89STejun Heo 
43896029a918STejun Heo 	if (flags & WQ_UNBOUND) {
4390be69d00dSThomas Gleixner 		wq->unbound_attrs = alloc_workqueue_attrs();
43916029a918STejun Heo 		if (!wq->unbound_attrs)
43926029a918STejun Heo 			goto err_free_wq;
43936029a918STejun Heo 	}
43946029a918STejun Heo 
4395669de8bdSBart Van Assche 	va_start(args, max_active);
4396ecf6881fSTejun Heo 	vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4397b196be89STejun Heo 	va_end(args);
43983af24433SOleg Nesterov 
4399d320c038STejun Heo 	max_active = max_active ?: WQ_DFL_ACTIVE;
4400b196be89STejun Heo 	max_active = wq_clamp_max_active(max_active, flags, wq->name);
44013af24433SOleg Nesterov 
4402b196be89STejun Heo 	/* init wq */
440397e37d7bSTejun Heo 	wq->flags = flags;
4404a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
44053c25a55dSLai Jiangshan 	mutex_init(&wq->mutex);
4406112202d9STejun Heo 	atomic_set(&wq->nr_pwqs_to_flush, 0);
440730cdf249STejun Heo 	INIT_LIST_HEAD(&wq->pwqs);
440873f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
440973f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
4410493a1724STejun Heo 	INIT_LIST_HEAD(&wq->maydays);
44113af24433SOleg Nesterov 
4412669de8bdSBart Van Assche 	wq_init_lockdep(wq);
4413cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
44143af24433SOleg Nesterov 
441530cdf249STejun Heo 	if (alloc_and_link_pwqs(wq) < 0)
441682efcab3SBart Van Assche 		goto err_unreg_lockdep;
44171537663fSTejun Heo 
441840c17f75STejun Heo 	if (wq_online && init_rescuer(wq) < 0)
4419d2c1d404STejun Heo 		goto err_destroy;
4420e22bee78STejun Heo 
4421226223abSTejun Heo 	if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4422226223abSTejun Heo 		goto err_destroy;
4423226223abSTejun Heo 
44246af8bf3dSOleg Nesterov 	/*
442568e13a67SLai Jiangshan 	 * wq_pool_mutex protects global freeze state and workqueues list.
442668e13a67SLai Jiangshan 	 * Grab it, adjust max_active and add the new @wq to workqueues
442768e13a67SLai Jiangshan 	 * list.
44286af8bf3dSOleg Nesterov 	 */
442968e13a67SLai Jiangshan 	mutex_lock(&wq_pool_mutex);
4430a0a1a5fdSTejun Heo 
4431a357fc03SLai Jiangshan 	mutex_lock(&wq->mutex);
443249e3cf44STejun Heo 	for_each_pwq(pwq, wq)
4433699ce097STejun Heo 		pwq_adjust_max_active(pwq);
4434a357fc03SLai Jiangshan 	mutex_unlock(&wq->mutex);
4435a0a1a5fdSTejun Heo 
4436e2dca7adSTejun Heo 	list_add_tail_rcu(&wq->list, &workqueues);
4437a0a1a5fdSTejun Heo 
443868e13a67SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
44393af24433SOleg Nesterov 
44403af24433SOleg Nesterov 	return wq;
4441d2c1d404STejun Heo 
444282efcab3SBart Van Assche err_unreg_lockdep:
4443009bb421SBart Van Assche 	wq_unregister_lockdep(wq);
4444009bb421SBart Van Assche 	wq_free_lockdep(wq);
444582efcab3SBart Van Assche err_free_wq:
44466029a918STejun Heo 	free_workqueue_attrs(wq->unbound_attrs);
44474690c4abSTejun Heo 	kfree(wq);
4448d2c1d404STejun Heo 	return NULL;
4449d2c1d404STejun Heo err_destroy:
4450d2c1d404STejun Heo 	destroy_workqueue(wq);
44514690c4abSTejun Heo 	return NULL;
44521da177e4SLinus Torvalds }
4453669de8bdSBart Van Assche EXPORT_SYMBOL_GPL(alloc_workqueue);
44541da177e4SLinus Torvalds 
4455c29eb853STejun Heo static bool pwq_busy(struct pool_workqueue *pwq)
4456c29eb853STejun Heo {
4457c29eb853STejun Heo 	int i;
4458c29eb853STejun Heo 
4459c29eb853STejun Heo 	for (i = 0; i < WORK_NR_COLORS; i++)
4460c29eb853STejun Heo 		if (pwq->nr_in_flight[i])
4461c29eb853STejun Heo 			return true;
4462c29eb853STejun Heo 
4463c29eb853STejun Heo 	if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4464c29eb853STejun Heo 		return true;
4465f97a4a1aSLai Jiangshan 	if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4466c29eb853STejun Heo 		return true;
4467c29eb853STejun Heo 
4468c29eb853STejun Heo 	return false;
4469c29eb853STejun Heo }
4470c29eb853STejun Heo 
44713af24433SOleg Nesterov /**
44723af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
44733af24433SOleg Nesterov  * @wq: target workqueue
44743af24433SOleg Nesterov  *
44753af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
44763af24433SOleg Nesterov  */
44773af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
44783af24433SOleg Nesterov {
447949e3cf44STejun Heo 	struct pool_workqueue *pwq;
44804c16bd32STejun Heo 	int node;
44813af24433SOleg Nesterov 
4482def98c84STejun Heo 	/*
4483def98c84STejun Heo 	 * Remove it from sysfs first so that sanity check failure doesn't
4484def98c84STejun Heo 	 * lead to sysfs name conflicts.
4485def98c84STejun Heo 	 */
4486def98c84STejun Heo 	workqueue_sysfs_unregister(wq);
4487def98c84STejun Heo 
448833e3f0a3SRichard Clark 	/* mark the workqueue destruction is in progress */
448933e3f0a3SRichard Clark 	mutex_lock(&wq->mutex);
449033e3f0a3SRichard Clark 	wq->flags |= __WQ_DESTROYING;
449133e3f0a3SRichard Clark 	mutex_unlock(&wq->mutex);
449233e3f0a3SRichard Clark 
44939c5a2ba7STejun Heo 	/* drain it before proceeding with destruction */
44949c5a2ba7STejun Heo 	drain_workqueue(wq);
4495c8efcc25STejun Heo 
4496def98c84STejun Heo 	/* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4497def98c84STejun Heo 	if (wq->rescuer) {
4498def98c84STejun Heo 		struct worker *rescuer = wq->rescuer;
4499def98c84STejun Heo 
4500def98c84STejun Heo 		/* this prevents new queueing */
4501a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&wq_mayday_lock);
4502def98c84STejun Heo 		wq->rescuer = NULL;
4503a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&wq_mayday_lock);
4504def98c84STejun Heo 
4505def98c84STejun Heo 		/* rescuer will empty maydays list before exiting */
4506def98c84STejun Heo 		kthread_stop(rescuer->task);
45078efe1223STejun Heo 		kfree(rescuer);
4508def98c84STejun Heo 	}
4509def98c84STejun Heo 
4510c29eb853STejun Heo 	/*
4511c29eb853STejun Heo 	 * Sanity checks - grab all the locks so that we wait for all
4512c29eb853STejun Heo 	 * in-flight operations which may do put_pwq().
4513c29eb853STejun Heo 	 */
4514c29eb853STejun Heo 	mutex_lock(&wq_pool_mutex);
4515b09f4fd3SLai Jiangshan 	mutex_lock(&wq->mutex);
451649e3cf44STejun Heo 	for_each_pwq(pwq, wq) {
4517a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&pwq->pool->lock);
4518c29eb853STejun Heo 		if (WARN_ON(pwq_busy(pwq))) {
45191d9a6159SKefeng Wang 			pr_warn("%s: %s has the following busy pwq\n",
4520e66b39afSTejun Heo 				__func__, wq->name);
4521c29eb853STejun Heo 			show_pwq(pwq);
4522a9b8a985SSebastian Andrzej Siewior 			raw_spin_unlock_irq(&pwq->pool->lock);
4523b09f4fd3SLai Jiangshan 			mutex_unlock(&wq->mutex);
4524c29eb853STejun Heo 			mutex_unlock(&wq_pool_mutex);
452555df0933SImran Khan 			show_one_workqueue(wq);
45266183c009STejun Heo 			return;
452776af4d93STejun Heo 		}
4528a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pwq->pool->lock);
452976af4d93STejun Heo 	}
4530b09f4fd3SLai Jiangshan 	mutex_unlock(&wq->mutex);
45316183c009STejun Heo 
4532a0a1a5fdSTejun Heo 	/*
4533a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
4534a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
4535a0a1a5fdSTejun Heo 	 */
4536e2dca7adSTejun Heo 	list_del_rcu(&wq->list);
453768e13a67SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
45383af24433SOleg Nesterov 
45398864b4e5STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
4540669de8bdSBart Van Assche 		wq_unregister_lockdep(wq);
454129c91e99STejun Heo 		/*
45428864b4e5STejun Heo 		 * The base ref is never dropped on per-cpu pwqs.  Directly
4543e2dca7adSTejun Heo 		 * schedule RCU free.
454429c91e99STejun Heo 		 */
454525b00775SPaul E. McKenney 		call_rcu(&wq->rcu, rcu_free_wq);
45468864b4e5STejun Heo 	} else {
45478864b4e5STejun Heo 		/*
45488864b4e5STejun Heo 		 * We're the sole accessor of @wq at this point.  Directly
45494c16bd32STejun Heo 		 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
45504c16bd32STejun Heo 		 * @wq will be freed when the last pwq is released.
45518864b4e5STejun Heo 		 */
45524c16bd32STejun Heo 		for_each_node(node) {
45534c16bd32STejun Heo 			pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
45544c16bd32STejun Heo 			RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
45554c16bd32STejun Heo 			put_pwq_unlocked(pwq);
45564c16bd32STejun Heo 		}
45574c16bd32STejun Heo 
45584c16bd32STejun Heo 		/*
45594c16bd32STejun Heo 		 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
45604c16bd32STejun Heo 		 * put.  Don't access it afterwards.
45614c16bd32STejun Heo 		 */
45624c16bd32STejun Heo 		pwq = wq->dfl_pwq;
45634c16bd32STejun Heo 		wq->dfl_pwq = NULL;
4564dce90d47STejun Heo 		put_pwq_unlocked(pwq);
456529c91e99STejun Heo 	}
45663af24433SOleg Nesterov }
45673af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
45683af24433SOleg Nesterov 
4569dcd989cbSTejun Heo /**
4570dcd989cbSTejun Heo  * workqueue_set_max_active - adjust max_active of a workqueue
4571dcd989cbSTejun Heo  * @wq: target workqueue
4572dcd989cbSTejun Heo  * @max_active: new max_active value.
4573dcd989cbSTejun Heo  *
4574dcd989cbSTejun Heo  * Set max_active of @wq to @max_active.
4575dcd989cbSTejun Heo  *
4576dcd989cbSTejun Heo  * CONTEXT:
4577dcd989cbSTejun Heo  * Don't call from IRQ context.
4578dcd989cbSTejun Heo  */
4579dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4580dcd989cbSTejun Heo {
458149e3cf44STejun Heo 	struct pool_workqueue *pwq;
4582dcd989cbSTejun Heo 
45838719dceaSTejun Heo 	/* disallow meddling with max_active for ordered workqueues */
45840a94efb5STejun Heo 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
45858719dceaSTejun Heo 		return;
45868719dceaSTejun Heo 
4587f3421797STejun Heo 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4588dcd989cbSTejun Heo 
4589a357fc03SLai Jiangshan 	mutex_lock(&wq->mutex);
4590dcd989cbSTejun Heo 
45910a94efb5STejun Heo 	wq->flags &= ~__WQ_ORDERED;
4592dcd989cbSTejun Heo 	wq->saved_max_active = max_active;
4593dcd989cbSTejun Heo 
4594699ce097STejun Heo 	for_each_pwq(pwq, wq)
4595699ce097STejun Heo 		pwq_adjust_max_active(pwq);
4596dcd989cbSTejun Heo 
4597a357fc03SLai Jiangshan 	mutex_unlock(&wq->mutex);
4598dcd989cbSTejun Heo }
4599dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4600dcd989cbSTejun Heo 
4601dcd989cbSTejun Heo /**
460227d4ee03SLukas Wunner  * current_work - retrieve %current task's work struct
460327d4ee03SLukas Wunner  *
460427d4ee03SLukas Wunner  * Determine if %current task is a workqueue worker and what it's working on.
460527d4ee03SLukas Wunner  * Useful to find out the context that the %current task is running in.
460627d4ee03SLukas Wunner  *
460727d4ee03SLukas Wunner  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
460827d4ee03SLukas Wunner  */
460927d4ee03SLukas Wunner struct work_struct *current_work(void)
461027d4ee03SLukas Wunner {
461127d4ee03SLukas Wunner 	struct worker *worker = current_wq_worker();
461227d4ee03SLukas Wunner 
461327d4ee03SLukas Wunner 	return worker ? worker->current_work : NULL;
461427d4ee03SLukas Wunner }
461527d4ee03SLukas Wunner EXPORT_SYMBOL(current_work);
461627d4ee03SLukas Wunner 
461727d4ee03SLukas Wunner /**
4618e6267616STejun Heo  * current_is_workqueue_rescuer - is %current workqueue rescuer?
4619e6267616STejun Heo  *
4620e6267616STejun Heo  * Determine whether %current is a workqueue rescuer.  Can be used from
4621e6267616STejun Heo  * work functions to determine whether it's being run off the rescuer task.
4622d185af30SYacine Belkadi  *
4623d185af30SYacine Belkadi  * Return: %true if %current is a workqueue rescuer. %false otherwise.
4624e6267616STejun Heo  */
4625e6267616STejun Heo bool current_is_workqueue_rescuer(void)
4626e6267616STejun Heo {
4627e6267616STejun Heo 	struct worker *worker = current_wq_worker();
4628e6267616STejun Heo 
46296a092dfdSLai Jiangshan 	return worker && worker->rescue_wq;
4630e6267616STejun Heo }
4631e6267616STejun Heo 
4632e6267616STejun Heo /**
4633dcd989cbSTejun Heo  * workqueue_congested - test whether a workqueue is congested
4634dcd989cbSTejun Heo  * @cpu: CPU in question
4635dcd989cbSTejun Heo  * @wq: target workqueue
4636dcd989cbSTejun Heo  *
4637dcd989cbSTejun Heo  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4638dcd989cbSTejun Heo  * no synchronization around this function and the test result is
4639dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
4640dcd989cbSTejun Heo  *
4641d3251859STejun Heo  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4642d3251859STejun Heo  * Note that both per-cpu and unbound workqueues may be associated with
4643d3251859STejun Heo  * multiple pool_workqueues which have separate congested states.  A
4644d3251859STejun Heo  * workqueue being congested on one CPU doesn't mean the workqueue is also
4645d3251859STejun Heo  * contested on other CPUs / NUMA nodes.
4646d3251859STejun Heo  *
4647d185af30SYacine Belkadi  * Return:
4648dcd989cbSTejun Heo  * %true if congested, %false otherwise.
4649dcd989cbSTejun Heo  */
4650d84ff051STejun Heo bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4651dcd989cbSTejun Heo {
46527fb98ea7STejun Heo 	struct pool_workqueue *pwq;
465376af4d93STejun Heo 	bool ret;
465476af4d93STejun Heo 
465524acfb71SThomas Gleixner 	rcu_read_lock();
465624acfb71SThomas Gleixner 	preempt_disable();
46577fb98ea7STejun Heo 
4658d3251859STejun Heo 	if (cpu == WORK_CPU_UNBOUND)
4659d3251859STejun Heo 		cpu = smp_processor_id();
4660d3251859STejun Heo 
46617fb98ea7STejun Heo 	if (!(wq->flags & WQ_UNBOUND))
46627fb98ea7STejun Heo 		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
46637fb98ea7STejun Heo 	else
4664df2d5ae4STejun Heo 		pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4665dcd989cbSTejun Heo 
4666f97a4a1aSLai Jiangshan 	ret = !list_empty(&pwq->inactive_works);
466724acfb71SThomas Gleixner 	preempt_enable();
466824acfb71SThomas Gleixner 	rcu_read_unlock();
466976af4d93STejun Heo 
467076af4d93STejun Heo 	return ret;
4671dcd989cbSTejun Heo }
4672dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested);
4673dcd989cbSTejun Heo 
4674dcd989cbSTejun Heo /**
4675dcd989cbSTejun Heo  * work_busy - test whether a work is currently pending or running
4676dcd989cbSTejun Heo  * @work: the work to be tested
4677dcd989cbSTejun Heo  *
4678dcd989cbSTejun Heo  * Test whether @work is currently pending or running.  There is no
4679dcd989cbSTejun Heo  * synchronization around this function and the test result is
4680dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
4681dcd989cbSTejun Heo  *
4682d185af30SYacine Belkadi  * Return:
4683dcd989cbSTejun Heo  * OR'd bitmask of WORK_BUSY_* bits.
4684dcd989cbSTejun Heo  */
4685dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work)
4686dcd989cbSTejun Heo {
4687fa1b54e6STejun Heo 	struct worker_pool *pool;
4688dcd989cbSTejun Heo 	unsigned long flags;
4689dcd989cbSTejun Heo 	unsigned int ret = 0;
4690dcd989cbSTejun Heo 
4691dcd989cbSTejun Heo 	if (work_pending(work))
4692dcd989cbSTejun Heo 		ret |= WORK_BUSY_PENDING;
4693038366c5SLai Jiangshan 
469424acfb71SThomas Gleixner 	rcu_read_lock();
4695fa1b54e6STejun Heo 	pool = get_work_pool(work);
4696038366c5SLai Jiangshan 	if (pool) {
4697a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irqsave(&pool->lock, flags);
4698c9e7cf27STejun Heo 		if (find_worker_executing_work(pool, work))
4699dcd989cbSTejun Heo 			ret |= WORK_BUSY_RUNNING;
4700a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irqrestore(&pool->lock, flags);
4701038366c5SLai Jiangshan 	}
470224acfb71SThomas Gleixner 	rcu_read_unlock();
4703dcd989cbSTejun Heo 
4704dcd989cbSTejun Heo 	return ret;
4705dcd989cbSTejun Heo }
4706dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy);
4707dcd989cbSTejun Heo 
47083d1cb205STejun Heo /**
47093d1cb205STejun Heo  * set_worker_desc - set description for the current work item
47103d1cb205STejun Heo  * @fmt: printf-style format string
47113d1cb205STejun Heo  * @...: arguments for the format string
47123d1cb205STejun Heo  *
47133d1cb205STejun Heo  * This function can be called by a running work function to describe what
47143d1cb205STejun Heo  * the work item is about.  If the worker task gets dumped, this
47153d1cb205STejun Heo  * information will be printed out together to help debugging.  The
47163d1cb205STejun Heo  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
47173d1cb205STejun Heo  */
47183d1cb205STejun Heo void set_worker_desc(const char *fmt, ...)
47193d1cb205STejun Heo {
47203d1cb205STejun Heo 	struct worker *worker = current_wq_worker();
47213d1cb205STejun Heo 	va_list args;
47223d1cb205STejun Heo 
47233d1cb205STejun Heo 	if (worker) {
47243d1cb205STejun Heo 		va_start(args, fmt);
47253d1cb205STejun Heo 		vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
47263d1cb205STejun Heo 		va_end(args);
47273d1cb205STejun Heo 	}
47283d1cb205STejun Heo }
47295c750d58SSteffen Maier EXPORT_SYMBOL_GPL(set_worker_desc);
47303d1cb205STejun Heo 
47313d1cb205STejun Heo /**
47323d1cb205STejun Heo  * print_worker_info - print out worker information and description
47333d1cb205STejun Heo  * @log_lvl: the log level to use when printing
47343d1cb205STejun Heo  * @task: target task
47353d1cb205STejun Heo  *
47363d1cb205STejun Heo  * If @task is a worker and currently executing a work item, print out the
47373d1cb205STejun Heo  * name of the workqueue being serviced and worker description set with
47383d1cb205STejun Heo  * set_worker_desc() by the currently executing work item.
47393d1cb205STejun Heo  *
47403d1cb205STejun Heo  * This function can be safely called on any task as long as the
47413d1cb205STejun Heo  * task_struct itself is accessible.  While safe, this function isn't
47423d1cb205STejun Heo  * synchronized and may print out mixups or garbages of limited length.
47433d1cb205STejun Heo  */
47443d1cb205STejun Heo void print_worker_info(const char *log_lvl, struct task_struct *task)
47453d1cb205STejun Heo {
47463d1cb205STejun Heo 	work_func_t *fn = NULL;
47473d1cb205STejun Heo 	char name[WQ_NAME_LEN] = { };
47483d1cb205STejun Heo 	char desc[WORKER_DESC_LEN] = { };
47493d1cb205STejun Heo 	struct pool_workqueue *pwq = NULL;
47503d1cb205STejun Heo 	struct workqueue_struct *wq = NULL;
47513d1cb205STejun Heo 	struct worker *worker;
47523d1cb205STejun Heo 
47533d1cb205STejun Heo 	if (!(task->flags & PF_WQ_WORKER))
47543d1cb205STejun Heo 		return;
47553d1cb205STejun Heo 
47563d1cb205STejun Heo 	/*
47573d1cb205STejun Heo 	 * This function is called without any synchronization and @task
47583d1cb205STejun Heo 	 * could be in any state.  Be careful with dereferences.
47593d1cb205STejun Heo 	 */
4760e700591aSPetr Mladek 	worker = kthread_probe_data(task);
47613d1cb205STejun Heo 
47623d1cb205STejun Heo 	/*
47638bf89593STejun Heo 	 * Carefully copy the associated workqueue's workfn, name and desc.
47648bf89593STejun Heo 	 * Keep the original last '\0' in case the original is garbage.
47653d1cb205STejun Heo 	 */
4766fe557319SChristoph Hellwig 	copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
4767fe557319SChristoph Hellwig 	copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4768fe557319SChristoph Hellwig 	copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
4769fe557319SChristoph Hellwig 	copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
4770fe557319SChristoph Hellwig 	copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
47713d1cb205STejun Heo 
47723d1cb205STejun Heo 	if (fn || name[0] || desc[0]) {
4773d75f773cSSakari Ailus 		printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
47748bf89593STejun Heo 		if (strcmp(name, desc))
47753d1cb205STejun Heo 			pr_cont(" (%s)", desc);
47763d1cb205STejun Heo 		pr_cont("\n");
47773d1cb205STejun Heo 	}
47783d1cb205STejun Heo }
47793d1cb205STejun Heo 
47803494fc30STejun Heo static void pr_cont_pool_info(struct worker_pool *pool)
47813494fc30STejun Heo {
47823494fc30STejun Heo 	pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
47833494fc30STejun Heo 	if (pool->node != NUMA_NO_NODE)
47843494fc30STejun Heo 		pr_cont(" node=%d", pool->node);
47853494fc30STejun Heo 	pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
47863494fc30STejun Heo }
47873494fc30STejun Heo 
4788c76feb0dSPaul E. McKenney struct pr_cont_work_struct {
4789c76feb0dSPaul E. McKenney 	bool comma;
4790c76feb0dSPaul E. McKenney 	work_func_t func;
4791c76feb0dSPaul E. McKenney 	long ctr;
4792c76feb0dSPaul E. McKenney };
4793c76feb0dSPaul E. McKenney 
4794c76feb0dSPaul E. McKenney static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
4795c76feb0dSPaul E. McKenney {
4796c76feb0dSPaul E. McKenney 	if (!pcwsp->ctr)
4797c76feb0dSPaul E. McKenney 		goto out_record;
4798c76feb0dSPaul E. McKenney 	if (func == pcwsp->func) {
4799c76feb0dSPaul E. McKenney 		pcwsp->ctr++;
4800c76feb0dSPaul E. McKenney 		return;
4801c76feb0dSPaul E. McKenney 	}
4802c76feb0dSPaul E. McKenney 	if (pcwsp->ctr == 1)
4803c76feb0dSPaul E. McKenney 		pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
4804c76feb0dSPaul E. McKenney 	else
4805c76feb0dSPaul E. McKenney 		pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
4806c76feb0dSPaul E. McKenney 	pcwsp->ctr = 0;
4807c76feb0dSPaul E. McKenney out_record:
4808c76feb0dSPaul E. McKenney 	if ((long)func == -1L)
4809c76feb0dSPaul E. McKenney 		return;
4810c76feb0dSPaul E. McKenney 	pcwsp->comma = comma;
4811c76feb0dSPaul E. McKenney 	pcwsp->func = func;
4812c76feb0dSPaul E. McKenney 	pcwsp->ctr = 1;
4813c76feb0dSPaul E. McKenney }
4814c76feb0dSPaul E. McKenney 
4815c76feb0dSPaul E. McKenney static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
48163494fc30STejun Heo {
48173494fc30STejun Heo 	if (work->func == wq_barrier_func) {
48183494fc30STejun Heo 		struct wq_barrier *barr;
48193494fc30STejun Heo 
48203494fc30STejun Heo 		barr = container_of(work, struct wq_barrier, work);
48213494fc30STejun Heo 
4822c76feb0dSPaul E. McKenney 		pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
48233494fc30STejun Heo 		pr_cont("%s BAR(%d)", comma ? "," : "",
48243494fc30STejun Heo 			task_pid_nr(barr->task));
48253494fc30STejun Heo 	} else {
4826c76feb0dSPaul E. McKenney 		if (!comma)
4827c76feb0dSPaul E. McKenney 			pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
4828c76feb0dSPaul E. McKenney 		pr_cont_work_flush(comma, work->func, pcwsp);
48293494fc30STejun Heo 	}
48303494fc30STejun Heo }
48313494fc30STejun Heo 
48323494fc30STejun Heo static void show_pwq(struct pool_workqueue *pwq)
48333494fc30STejun Heo {
4834c76feb0dSPaul E. McKenney 	struct pr_cont_work_struct pcws = { .ctr = 0, };
48353494fc30STejun Heo 	struct worker_pool *pool = pwq->pool;
48363494fc30STejun Heo 	struct work_struct *work;
48373494fc30STejun Heo 	struct worker *worker;
48383494fc30STejun Heo 	bool has_in_flight = false, has_pending = false;
48393494fc30STejun Heo 	int bkt;
48403494fc30STejun Heo 
48413494fc30STejun Heo 	pr_info("  pwq %d:", pool->id);
48423494fc30STejun Heo 	pr_cont_pool_info(pool);
48433494fc30STejun Heo 
4844e66b39afSTejun Heo 	pr_cont(" active=%d/%d refcnt=%d%s\n",
4845e66b39afSTejun Heo 		pwq->nr_active, pwq->max_active, pwq->refcnt,
48463494fc30STejun Heo 		!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
48473494fc30STejun Heo 
48483494fc30STejun Heo 	hash_for_each(pool->busy_hash, bkt, worker, hentry) {
48493494fc30STejun Heo 		if (worker->current_pwq == pwq) {
48503494fc30STejun Heo 			has_in_flight = true;
48513494fc30STejun Heo 			break;
48523494fc30STejun Heo 		}
48533494fc30STejun Heo 	}
48543494fc30STejun Heo 	if (has_in_flight) {
48553494fc30STejun Heo 		bool comma = false;
48563494fc30STejun Heo 
48573494fc30STejun Heo 		pr_info("    in-flight:");
48583494fc30STejun Heo 		hash_for_each(pool->busy_hash, bkt, worker, hentry) {
48593494fc30STejun Heo 			if (worker->current_pwq != pwq)
48603494fc30STejun Heo 				continue;
48613494fc30STejun Heo 
4862d75f773cSSakari Ailus 			pr_cont("%s %d%s:%ps", comma ? "," : "",
48633494fc30STejun Heo 				task_pid_nr(worker->task),
486430ae2fc0STejun Heo 				worker->rescue_wq ? "(RESCUER)" : "",
48653494fc30STejun Heo 				worker->current_func);
48663494fc30STejun Heo 			list_for_each_entry(work, &worker->scheduled, entry)
4867c76feb0dSPaul E. McKenney 				pr_cont_work(false, work, &pcws);
4868c76feb0dSPaul E. McKenney 			pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
48693494fc30STejun Heo 			comma = true;
48703494fc30STejun Heo 		}
48713494fc30STejun Heo 		pr_cont("\n");
48723494fc30STejun Heo 	}
48733494fc30STejun Heo 
48743494fc30STejun Heo 	list_for_each_entry(work, &pool->worklist, entry) {
48753494fc30STejun Heo 		if (get_work_pwq(work) == pwq) {
48763494fc30STejun Heo 			has_pending = true;
48773494fc30STejun Heo 			break;
48783494fc30STejun Heo 		}
48793494fc30STejun Heo 	}
48803494fc30STejun Heo 	if (has_pending) {
48813494fc30STejun Heo 		bool comma = false;
48823494fc30STejun Heo 
48833494fc30STejun Heo 		pr_info("    pending:");
48843494fc30STejun Heo 		list_for_each_entry(work, &pool->worklist, entry) {
48853494fc30STejun Heo 			if (get_work_pwq(work) != pwq)
48863494fc30STejun Heo 				continue;
48873494fc30STejun Heo 
4888c76feb0dSPaul E. McKenney 			pr_cont_work(comma, work, &pcws);
48893494fc30STejun Heo 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
48903494fc30STejun Heo 		}
4891c76feb0dSPaul E. McKenney 		pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
48923494fc30STejun Heo 		pr_cont("\n");
48933494fc30STejun Heo 	}
48943494fc30STejun Heo 
4895f97a4a1aSLai Jiangshan 	if (!list_empty(&pwq->inactive_works)) {
48963494fc30STejun Heo 		bool comma = false;
48973494fc30STejun Heo 
4898f97a4a1aSLai Jiangshan 		pr_info("    inactive:");
4899f97a4a1aSLai Jiangshan 		list_for_each_entry(work, &pwq->inactive_works, entry) {
4900c76feb0dSPaul E. McKenney 			pr_cont_work(comma, work, &pcws);
49013494fc30STejun Heo 			comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
49023494fc30STejun Heo 		}
4903c76feb0dSPaul E. McKenney 		pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
49043494fc30STejun Heo 		pr_cont("\n");
49053494fc30STejun Heo 	}
49063494fc30STejun Heo }
49073494fc30STejun Heo 
49083494fc30STejun Heo /**
490955df0933SImran Khan  * show_one_workqueue - dump state of specified workqueue
491055df0933SImran Khan  * @wq: workqueue whose state will be printed
49113494fc30STejun Heo  */
491255df0933SImran Khan void show_one_workqueue(struct workqueue_struct *wq)
49133494fc30STejun Heo {
49143494fc30STejun Heo 	struct pool_workqueue *pwq;
49153494fc30STejun Heo 	bool idle = true;
491655df0933SImran Khan 	unsigned long flags;
49173494fc30STejun Heo 
49183494fc30STejun Heo 	for_each_pwq(pwq, wq) {
4919f97a4a1aSLai Jiangshan 		if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
49203494fc30STejun Heo 			idle = false;
49213494fc30STejun Heo 			break;
49223494fc30STejun Heo 		}
49233494fc30STejun Heo 	}
492455df0933SImran Khan 	if (idle) /* Nothing to print for idle workqueue */
492555df0933SImran Khan 		return;
49263494fc30STejun Heo 
49273494fc30STejun Heo 	pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
49283494fc30STejun Heo 
49293494fc30STejun Heo 	for_each_pwq(pwq, wq) {
4930a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irqsave(&pwq->pool->lock, flags);
493157116ce1SJohan Hovold 		if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
493257116ce1SJohan Hovold 			/*
493357116ce1SJohan Hovold 			 * Defer printing to avoid deadlocks in console
493457116ce1SJohan Hovold 			 * drivers that queue work while holding locks
493557116ce1SJohan Hovold 			 * also taken in their write paths.
493657116ce1SJohan Hovold 			 */
493757116ce1SJohan Hovold 			printk_deferred_enter();
49383494fc30STejun Heo 			show_pwq(pwq);
493957116ce1SJohan Hovold 			printk_deferred_exit();
494057116ce1SJohan Hovold 		}
4941a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
494262635ea8SSergey Senozhatsky 		/*
494362635ea8SSergey Senozhatsky 		 * We could be printing a lot from atomic context, e.g.
494455df0933SImran Khan 		 * sysrq-t -> show_all_workqueues(). Avoid triggering
494562635ea8SSergey Senozhatsky 		 * hard lockup.
494662635ea8SSergey Senozhatsky 		 */
494762635ea8SSergey Senozhatsky 		touch_nmi_watchdog();
49483494fc30STejun Heo 	}
494955df0933SImran Khan 
49503494fc30STejun Heo }
49513494fc30STejun Heo 
495255df0933SImran Khan /**
495355df0933SImran Khan  * show_one_worker_pool - dump state of specified worker pool
495455df0933SImran Khan  * @pool: worker pool whose state will be printed
495555df0933SImran Khan  */
495655df0933SImran Khan static void show_one_worker_pool(struct worker_pool *pool)
495755df0933SImran Khan {
49583494fc30STejun Heo 	struct worker *worker;
49593494fc30STejun Heo 	bool first = true;
496055df0933SImran Khan 	unsigned long flags;
49613494fc30STejun Heo 
4962a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irqsave(&pool->lock, flags);
49633494fc30STejun Heo 	if (pool->nr_workers == pool->nr_idle)
49643494fc30STejun Heo 		goto next_pool;
496557116ce1SJohan Hovold 	/*
496657116ce1SJohan Hovold 	 * Defer printing to avoid deadlocks in console drivers that
496757116ce1SJohan Hovold 	 * queue work while holding locks also taken in their write
496857116ce1SJohan Hovold 	 * paths.
496957116ce1SJohan Hovold 	 */
497057116ce1SJohan Hovold 	printk_deferred_enter();
49713494fc30STejun Heo 	pr_info("pool %d:", pool->id);
49723494fc30STejun Heo 	pr_cont_pool_info(pool);
497382607adcSTejun Heo 	pr_cont(" hung=%us workers=%d",
497482607adcSTejun Heo 		jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
497582607adcSTejun Heo 		pool->nr_workers);
49763494fc30STejun Heo 	if (pool->manager)
49773494fc30STejun Heo 		pr_cont(" manager: %d",
49783494fc30STejun Heo 			task_pid_nr(pool->manager->task));
49793494fc30STejun Heo 	list_for_each_entry(worker, &pool->idle_list, entry) {
49803494fc30STejun Heo 		pr_cont(" %s%d", first ? "idle: " : "",
49813494fc30STejun Heo 			task_pid_nr(worker->task));
49823494fc30STejun Heo 		first = false;
49833494fc30STejun Heo 	}
49843494fc30STejun Heo 	pr_cont("\n");
498557116ce1SJohan Hovold 	printk_deferred_exit();
49863494fc30STejun Heo next_pool:
4987a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irqrestore(&pool->lock, flags);
498862635ea8SSergey Senozhatsky 	/*
498962635ea8SSergey Senozhatsky 	 * We could be printing a lot from atomic context, e.g.
499055df0933SImran Khan 	 * sysrq-t -> show_all_workqueues(). Avoid triggering
499162635ea8SSergey Senozhatsky 	 * hard lockup.
499262635ea8SSergey Senozhatsky 	 */
499362635ea8SSergey Senozhatsky 	touch_nmi_watchdog();
499455df0933SImran Khan 
49953494fc30STejun Heo }
49963494fc30STejun Heo 
499755df0933SImran Khan /**
499855df0933SImran Khan  * show_all_workqueues - dump workqueue state
499955df0933SImran Khan  *
500055df0933SImran Khan  * Called from a sysrq handler or try_to_freeze_tasks() and prints out
500155df0933SImran Khan  * all busy workqueues and pools.
500255df0933SImran Khan  */
500355df0933SImran Khan void show_all_workqueues(void)
500455df0933SImran Khan {
500555df0933SImran Khan 	struct workqueue_struct *wq;
500655df0933SImran Khan 	struct worker_pool *pool;
500755df0933SImran Khan 	int pi;
500855df0933SImran Khan 
500955df0933SImran Khan 	rcu_read_lock();
501055df0933SImran Khan 
501155df0933SImran Khan 	pr_info("Showing busy workqueues and worker pools:\n");
501255df0933SImran Khan 
501355df0933SImran Khan 	list_for_each_entry_rcu(wq, &workqueues, list)
501455df0933SImran Khan 		show_one_workqueue(wq);
501555df0933SImran Khan 
501655df0933SImran Khan 	for_each_pool(pool, pi)
501755df0933SImran Khan 		show_one_worker_pool(pool);
501855df0933SImran Khan 
501924acfb71SThomas Gleixner 	rcu_read_unlock();
50203494fc30STejun Heo }
50213494fc30STejun Heo 
50226b59808bSTejun Heo /* used to show worker information through /proc/PID/{comm,stat,status} */
50236b59808bSTejun Heo void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
50246b59808bSTejun Heo {
50256b59808bSTejun Heo 	int off;
50266b59808bSTejun Heo 
50276b59808bSTejun Heo 	/* always show the actual comm */
50286b59808bSTejun Heo 	off = strscpy(buf, task->comm, size);
50296b59808bSTejun Heo 	if (off < 0)
50306b59808bSTejun Heo 		return;
50316b59808bSTejun Heo 
5032197f6accSTejun Heo 	/* stabilize PF_WQ_WORKER and worker pool association */
50336b59808bSTejun Heo 	mutex_lock(&wq_pool_attach_mutex);
50346b59808bSTejun Heo 
5035197f6accSTejun Heo 	if (task->flags & PF_WQ_WORKER) {
5036197f6accSTejun Heo 		struct worker *worker = kthread_data(task);
5037197f6accSTejun Heo 		struct worker_pool *pool = worker->pool;
50386b59808bSTejun Heo 
50396b59808bSTejun Heo 		if (pool) {
5040a9b8a985SSebastian Andrzej Siewior 			raw_spin_lock_irq(&pool->lock);
50416b59808bSTejun Heo 			/*
5042197f6accSTejun Heo 			 * ->desc tracks information (wq name or
5043197f6accSTejun Heo 			 * set_worker_desc()) for the latest execution.  If
5044197f6accSTejun Heo 			 * current, prepend '+', otherwise '-'.
50456b59808bSTejun Heo 			 */
50466b59808bSTejun Heo 			if (worker->desc[0] != '\0') {
50476b59808bSTejun Heo 				if (worker->current_work)
50486b59808bSTejun Heo 					scnprintf(buf + off, size - off, "+%s",
50496b59808bSTejun Heo 						  worker->desc);
50506b59808bSTejun Heo 				else
50516b59808bSTejun Heo 					scnprintf(buf + off, size - off, "-%s",
50526b59808bSTejun Heo 						  worker->desc);
50536b59808bSTejun Heo 			}
5054a9b8a985SSebastian Andrzej Siewior 			raw_spin_unlock_irq(&pool->lock);
50556b59808bSTejun Heo 		}
5056197f6accSTejun Heo 	}
50576b59808bSTejun Heo 
50586b59808bSTejun Heo 	mutex_unlock(&wq_pool_attach_mutex);
50596b59808bSTejun Heo }
50606b59808bSTejun Heo 
506166448bc2SMathieu Malaterre #ifdef CONFIG_SMP
506266448bc2SMathieu Malaterre 
5063db7bccf4STejun Heo /*
5064db7bccf4STejun Heo  * CPU hotplug.
5065db7bccf4STejun Heo  *
5066e22bee78STejun Heo  * There are two challenges in supporting CPU hotplug.  Firstly, there
5067112202d9STejun Heo  * are a lot of assumptions on strong associations among work, pwq and
5068706026c2STejun Heo  * pool which make migrating pending and scheduled works very
5069e22bee78STejun Heo  * difficult to implement without impacting hot paths.  Secondly,
507094cf58bbSTejun Heo  * worker pools serve mix of short, long and very long running works making
5071e22bee78STejun Heo  * blocked draining impractical.
5072e22bee78STejun Heo  *
507324647570STejun Heo  * This is solved by allowing the pools to be disassociated from the CPU
5074628c78e7STejun Heo  * running as an unbound one and allowing it to be reattached later if the
5075628c78e7STejun Heo  * cpu comes back online.
5076db7bccf4STejun Heo  */
5077db7bccf4STejun Heo 
5078e8b3f8dbSLai Jiangshan static void unbind_workers(int cpu)
5079db7bccf4STejun Heo {
50804ce62e9eSTejun Heo 	struct worker_pool *pool;
5081db7bccf4STejun Heo 	struct worker *worker;
5082db7bccf4STejun Heo 
5083f02ae73aSTejun Heo 	for_each_cpu_worker_pool(pool, cpu) {
50841258fae7STejun Heo 		mutex_lock(&wq_pool_attach_mutex);
5085a9b8a985SSebastian Andrzej Siewior 		raw_spin_lock_irq(&pool->lock);
5086e22bee78STejun Heo 
5087f2d5a0eeSTejun Heo 		/*
508892f9c5c4SLai Jiangshan 		 * We've blocked all attach/detach operations. Make all workers
508994cf58bbSTejun Heo 		 * unbound and set DISASSOCIATED.  Before this, all workers
509011b45b0bSLai Jiangshan 		 * must be on the cpu.  After this, they may become diasporas.
5091b4ac9384SLai Jiangshan 		 * And the preemption disabled section in their sched callbacks
5092b4ac9384SLai Jiangshan 		 * are guaranteed to see WORKER_UNBOUND since the code here
5093b4ac9384SLai Jiangshan 		 * is on the same cpu.
5094f2d5a0eeSTejun Heo 		 */
5095da028469SLai Jiangshan 		for_each_pool_worker(worker, pool)
5096403c821dSTejun Heo 			worker->flags |= WORKER_UNBOUND;
5097db7bccf4STejun Heo 
509824647570STejun Heo 		pool->flags |= POOL_DISASSOCIATED;
5099f2d5a0eeSTejun Heo 
5100e22bee78STejun Heo 		/*
5101989442d7SLai Jiangshan 		 * The handling of nr_running in sched callbacks are disabled
5102989442d7SLai Jiangshan 		 * now.  Zap nr_running.  After this, nr_running stays zero and
5103989442d7SLai Jiangshan 		 * need_more_worker() and keep_working() are always true as
5104989442d7SLai Jiangshan 		 * long as the worklist is not empty.  This pool now behaves as
5105989442d7SLai Jiangshan 		 * an unbound (in terms of concurrency management) pool which
5106eb283428SLai Jiangshan 		 * are served by workers tied to the pool.
5107e22bee78STejun Heo 		 */
5108bc35f7efSLai Jiangshan 		pool->nr_running = 0;
5109eb283428SLai Jiangshan 
5110eb283428SLai Jiangshan 		/*
5111eb283428SLai Jiangshan 		 * With concurrency management just turned off, a busy
5112eb283428SLai Jiangshan 		 * worker blocking could lead to lengthy stalls.  Kick off
5113eb283428SLai Jiangshan 		 * unbound chain execution of currently pending work items.
5114eb283428SLai Jiangshan 		 */
5115eb283428SLai Jiangshan 		wake_up_worker(pool);
5116989442d7SLai Jiangshan 
5117a9b8a985SSebastian Andrzej Siewior 		raw_spin_unlock_irq(&pool->lock);
5118989442d7SLai Jiangshan 
5119793777bcSValentin Schneider 		for_each_pool_worker(worker, pool)
5120793777bcSValentin Schneider 			unbind_worker(worker);
5121989442d7SLai Jiangshan 
5122989442d7SLai Jiangshan 		mutex_unlock(&wq_pool_attach_mutex);
5123eb283428SLai Jiangshan 	}
5124db7bccf4STejun Heo }
5125db7bccf4STejun Heo 
5126bd7c089eSTejun Heo /**
5127bd7c089eSTejun Heo  * rebind_workers - rebind all workers of a pool to the associated CPU
5128bd7c089eSTejun Heo  * @pool: pool of interest
5129bd7c089eSTejun Heo  *
5130a9ab775bSTejun Heo  * @pool->cpu is coming online.  Rebind all workers to the CPU.
5131bd7c089eSTejun Heo  */
5132bd7c089eSTejun Heo static void rebind_workers(struct worker_pool *pool)
5133bd7c089eSTejun Heo {
5134a9ab775bSTejun Heo 	struct worker *worker;
5135bd7c089eSTejun Heo 
51361258fae7STejun Heo 	lockdep_assert_held(&wq_pool_attach_mutex);
5137bd7c089eSTejun Heo 
5138bd7c089eSTejun Heo 	/*
5139a9ab775bSTejun Heo 	 * Restore CPU affinity of all workers.  As all idle workers should
5140a9ab775bSTejun Heo 	 * be on the run-queue of the associated CPU before any local
5141402dd89dSShailendra Verma 	 * wake-ups for concurrency management happen, restore CPU affinity
5142a9ab775bSTejun Heo 	 * of all workers first and then clear UNBOUND.  As we're called
5143a9ab775bSTejun Heo 	 * from CPU_ONLINE, the following shouldn't fail.
5144bd7c089eSTejun Heo 	 */
5145793777bcSValentin Schneider 	for_each_pool_worker(worker, pool)
5146793777bcSValentin Schneider 		rebind_worker(worker, pool);
5147a9ab775bSTejun Heo 
5148a9b8a985SSebastian Andrzej Siewior 	raw_spin_lock_irq(&pool->lock);
5149f7c17d26SWanpeng Li 
51503de5e884SLai Jiangshan 	pool->flags &= ~POOL_DISASSOCIATED;
5151a9ab775bSTejun Heo 
5152da028469SLai Jiangshan 	for_each_pool_worker(worker, pool) {
5153a9ab775bSTejun Heo 		unsigned int worker_flags = worker->flags;
5154a9ab775bSTejun Heo 
5155a9ab775bSTejun Heo 		/*
5156a9ab775bSTejun Heo 		 * We want to clear UNBOUND but can't directly call
5157a9ab775bSTejun Heo 		 * worker_clr_flags() or adjust nr_running.  Atomically
5158a9ab775bSTejun Heo 		 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5159a9ab775bSTejun Heo 		 * @worker will clear REBOUND using worker_clr_flags() when
5160a9ab775bSTejun Heo 		 * it initiates the next execution cycle thus restoring
5161a9ab775bSTejun Heo 		 * concurrency management.  Note that when or whether
5162a9ab775bSTejun Heo 		 * @worker clears REBOUND doesn't affect correctness.
5163a9ab775bSTejun Heo 		 *
5164c95491edSMark Rutland 		 * WRITE_ONCE() is necessary because @worker->flags may be
5165a9ab775bSTejun Heo 		 * tested without holding any lock in
51666d25be57SThomas Gleixner 		 * wq_worker_running().  Without it, NOT_RUNNING test may
5167a9ab775bSTejun Heo 		 * fail incorrectly leading to premature concurrency
5168a9ab775bSTejun Heo 		 * management operations.
5169bd7c089eSTejun Heo 		 */
5170a9ab775bSTejun Heo 		WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5171a9ab775bSTejun Heo 		worker_flags |= WORKER_REBOUND;
5172a9ab775bSTejun Heo 		worker_flags &= ~WORKER_UNBOUND;
5173c95491edSMark Rutland 		WRITE_ONCE(worker->flags, worker_flags);
5174bd7c089eSTejun Heo 	}
5175a9ab775bSTejun Heo 
5176a9b8a985SSebastian Andrzej Siewior 	raw_spin_unlock_irq(&pool->lock);
5177bd7c089eSTejun Heo }
5178bd7c089eSTejun Heo 
51797dbc725eSTejun Heo /**
51807dbc725eSTejun Heo  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
51817dbc725eSTejun Heo  * @pool: unbound pool of interest
51827dbc725eSTejun Heo  * @cpu: the CPU which is coming up
51837dbc725eSTejun Heo  *
51847dbc725eSTejun Heo  * An unbound pool may end up with a cpumask which doesn't have any online
51857dbc725eSTejun Heo  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
51867dbc725eSTejun Heo  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
51877dbc725eSTejun Heo  * online CPU before, cpus_allowed of all its workers should be restored.
51887dbc725eSTejun Heo  */
51897dbc725eSTejun Heo static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
51907dbc725eSTejun Heo {
51917dbc725eSTejun Heo 	static cpumask_t cpumask;
51927dbc725eSTejun Heo 	struct worker *worker;
51937dbc725eSTejun Heo 
51941258fae7STejun Heo 	lockdep_assert_held(&wq_pool_attach_mutex);
51957dbc725eSTejun Heo 
51967dbc725eSTejun Heo 	/* is @cpu allowed for @pool? */
51977dbc725eSTejun Heo 	if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
51987dbc725eSTejun Heo 		return;
51997dbc725eSTejun Heo 
52007dbc725eSTejun Heo 	cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
52017dbc725eSTejun Heo 
52027dbc725eSTejun Heo 	/* as we're called from CPU_ONLINE, the following shouldn't fail */
5203da028469SLai Jiangshan 	for_each_pool_worker(worker, pool)
5204d945b5e9SPeter Zijlstra 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
52057dbc725eSTejun Heo }
52067dbc725eSTejun Heo 
52077ee681b2SThomas Gleixner int workqueue_prepare_cpu(unsigned int cpu)
52081da177e4SLinus Torvalds {
52094ce62e9eSTejun Heo 	struct worker_pool *pool;
52101da177e4SLinus Torvalds 
5211f02ae73aSTejun Heo 	for_each_cpu_worker_pool(pool, cpu) {
52123ce63377STejun Heo 		if (pool->nr_workers)
52133ce63377STejun Heo 			continue;
5214051e1850SLai Jiangshan 		if (!create_worker(pool))
52157ee681b2SThomas Gleixner 			return -ENOMEM;
52163af24433SOleg Nesterov 	}
52177ee681b2SThomas Gleixner 	return 0;
52187ee681b2SThomas Gleixner }
52191da177e4SLinus Torvalds 
52207ee681b2SThomas Gleixner int workqueue_online_cpu(unsigned int cpu)
52217ee681b2SThomas Gleixner {
52227ee681b2SThomas Gleixner 	struct worker_pool *pool;
52237ee681b2SThomas Gleixner 	struct workqueue_struct *wq;
52247ee681b2SThomas Gleixner 	int pi;
52257ee681b2SThomas Gleixner 
522668e13a67SLai Jiangshan 	mutex_lock(&wq_pool_mutex);
52277dbc725eSTejun Heo 
52287dbc725eSTejun Heo 	for_each_pool(pool, pi) {
52291258fae7STejun Heo 		mutex_lock(&wq_pool_attach_mutex);
523094cf58bbSTejun Heo 
5231f05b558dSLai Jiangshan 		if (pool->cpu == cpu)
523294cf58bbSTejun Heo 			rebind_workers(pool);
5233f05b558dSLai Jiangshan 		else if (pool->cpu < 0)
52347dbc725eSTejun Heo 			restore_unbound_workers_cpumask(pool, cpu);
523594cf58bbSTejun Heo 
52361258fae7STejun Heo 		mutex_unlock(&wq_pool_attach_mutex);
523794cf58bbSTejun Heo 	}
52387dbc725eSTejun Heo 
52394c16bd32STejun Heo 	/* update NUMA affinity of unbound workqueues */
52404c16bd32STejun Heo 	list_for_each_entry(wq, &workqueues, list)
52414c16bd32STejun Heo 		wq_update_unbound_numa(wq, cpu, true);
52424c16bd32STejun Heo 
524368e13a67SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
52447ee681b2SThomas Gleixner 	return 0;
524565758202STejun Heo }
524665758202STejun Heo 
52477ee681b2SThomas Gleixner int workqueue_offline_cpu(unsigned int cpu)
524865758202STejun Heo {
52494c16bd32STejun Heo 	struct workqueue_struct *wq;
52508db25e78STejun Heo 
52514c16bd32STejun Heo 	/* unbinding per-cpu workers should happen on the local CPU */
5252e8b3f8dbSLai Jiangshan 	if (WARN_ON(cpu != smp_processor_id()))
5253e8b3f8dbSLai Jiangshan 		return -1;
5254e8b3f8dbSLai Jiangshan 
5255e8b3f8dbSLai Jiangshan 	unbind_workers(cpu);
52564c16bd32STejun Heo 
52574c16bd32STejun Heo 	/* update NUMA affinity of unbound workqueues */
52584c16bd32STejun Heo 	mutex_lock(&wq_pool_mutex);
52594c16bd32STejun Heo 	list_for_each_entry(wq, &workqueues, list)
52604c16bd32STejun Heo 		wq_update_unbound_numa(wq, cpu, false);
52614c16bd32STejun Heo 	mutex_unlock(&wq_pool_mutex);
52624c16bd32STejun Heo 
52637ee681b2SThomas Gleixner 	return 0;
526465758202STejun Heo }
526565758202STejun Heo 
52662d3854a3SRusty Russell struct work_for_cpu {
5267ed48ece2STejun Heo 	struct work_struct work;
52682d3854a3SRusty Russell 	long (*fn)(void *);
52692d3854a3SRusty Russell 	void *arg;
52702d3854a3SRusty Russell 	long ret;
52712d3854a3SRusty Russell };
52722d3854a3SRusty Russell 
5273ed48ece2STejun Heo static void work_for_cpu_fn(struct work_struct *work)
52742d3854a3SRusty Russell {
5275ed48ece2STejun Heo 	struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5276ed48ece2STejun Heo 
52772d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
52782d3854a3SRusty Russell }
52792d3854a3SRusty Russell 
52802d3854a3SRusty Russell /**
528122aceb31SAnna-Maria Gleixner  * work_on_cpu - run a function in thread context on a particular cpu
52822d3854a3SRusty Russell  * @cpu: the cpu to run on
52832d3854a3SRusty Russell  * @fn: the function to run
52842d3854a3SRusty Russell  * @arg: the function arg
52852d3854a3SRusty Russell  *
528631ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
52876b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
5288d185af30SYacine Belkadi  *
5289d185af30SYacine Belkadi  * Return: The value @fn returns.
52902d3854a3SRusty Russell  */
5291d84ff051STejun Heo long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
52922d3854a3SRusty Russell {
5293ed48ece2STejun Heo 	struct work_for_cpu wfc = { .fn = fn, .arg = arg };
52942d3854a3SRusty Russell 
5295ed48ece2STejun Heo 	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5296ed48ece2STejun Heo 	schedule_work_on(cpu, &wfc.work);
529712997d1aSBjorn Helgaas 	flush_work(&wfc.work);
5298440a1136SChuansheng Liu 	destroy_work_on_stack(&wfc.work);
52992d3854a3SRusty Russell 	return wfc.ret;
53002d3854a3SRusty Russell }
53012d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
53020e8d6a93SThomas Gleixner 
53030e8d6a93SThomas Gleixner /**
53040e8d6a93SThomas Gleixner  * work_on_cpu_safe - run a function in thread context on a particular cpu
53050e8d6a93SThomas Gleixner  * @cpu: the cpu to run on
53060e8d6a93SThomas Gleixner  * @fn:  the function to run
53070e8d6a93SThomas Gleixner  * @arg: the function argument
53080e8d6a93SThomas Gleixner  *
53090e8d6a93SThomas Gleixner  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
53100e8d6a93SThomas Gleixner  * any locks which would prevent @fn from completing.
53110e8d6a93SThomas Gleixner  *
53120e8d6a93SThomas Gleixner  * Return: The value @fn returns.
53130e8d6a93SThomas Gleixner  */
53140e8d6a93SThomas Gleixner long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
53150e8d6a93SThomas Gleixner {
53160e8d6a93SThomas Gleixner 	long ret = -ENODEV;
53170e8d6a93SThomas Gleixner 
5318ffd8bea8SSebastian Andrzej Siewior 	cpus_read_lock();
53190e8d6a93SThomas Gleixner 	if (cpu_online(cpu))
53200e8d6a93SThomas Gleixner 		ret = work_on_cpu(cpu, fn, arg);
5321ffd8bea8SSebastian Andrzej Siewior 	cpus_read_unlock();
53220e8d6a93SThomas Gleixner 	return ret;
53230e8d6a93SThomas Gleixner }
53240e8d6a93SThomas Gleixner EXPORT_SYMBOL_GPL(work_on_cpu_safe);
53252d3854a3SRusty Russell #endif /* CONFIG_SMP */
53262d3854a3SRusty Russell 
5327a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
5328e7577c50SRusty Russell 
5329a0a1a5fdSTejun Heo /**
5330a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
5331a0a1a5fdSTejun Heo  *
533258a69cb4STejun Heo  * Start freezing workqueues.  After this function returns, all freezable
5333f97a4a1aSLai Jiangshan  * workqueues will queue new works to their inactive_works list instead of
5334706026c2STejun Heo  * pool->worklist.
5335a0a1a5fdSTejun Heo  *
5336a0a1a5fdSTejun Heo  * CONTEXT:
5337a357fc03SLai Jiangshan  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5338a0a1a5fdSTejun Heo  */
5339a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
5340a0a1a5fdSTejun Heo {
534124b8a847STejun Heo 	struct workqueue_struct *wq;
534224b8a847STejun Heo 	struct pool_workqueue *pwq;
5343a0a1a5fdSTejun Heo 
534468e13a67SLai Jiangshan 	mutex_lock(&wq_pool_mutex);
5345a0a1a5fdSTejun Heo 
53466183c009STejun Heo 	WARN_ON_ONCE(workqueue_freezing);
5347a0a1a5fdSTejun Heo 	workqueue_freezing = true;
5348a0a1a5fdSTejun Heo 
534924b8a847STejun Heo 	list_for_each_entry(wq, &workqueues, list) {
5350a357fc03SLai Jiangshan 		mutex_lock(&wq->mutex);
5351699ce097STejun Heo 		for_each_pwq(pwq, wq)
5352699ce097STejun Heo 			pwq_adjust_max_active(pwq);
5353a357fc03SLai Jiangshan 		mutex_unlock(&wq->mutex);
5354a1056305STejun Heo 	}
53555bcab335STejun Heo 
535668e13a67SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
5357a0a1a5fdSTejun Heo }
5358a0a1a5fdSTejun Heo 
5359a0a1a5fdSTejun Heo /**
536058a69cb4STejun Heo  * freeze_workqueues_busy - are freezable workqueues still busy?
5361a0a1a5fdSTejun Heo  *
5362a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
5363a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
5364a0a1a5fdSTejun Heo  *
5365a0a1a5fdSTejun Heo  * CONTEXT:
536668e13a67SLai Jiangshan  * Grabs and releases wq_pool_mutex.
5367a0a1a5fdSTejun Heo  *
5368d185af30SYacine Belkadi  * Return:
536958a69cb4STejun Heo  * %true if some freezable workqueues are still busy.  %false if freezing
537058a69cb4STejun Heo  * is complete.
5371a0a1a5fdSTejun Heo  */
5372a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
5373a0a1a5fdSTejun Heo {
5374a0a1a5fdSTejun Heo 	bool busy = false;
537524b8a847STejun Heo 	struct workqueue_struct *wq;
537624b8a847STejun Heo 	struct pool_workqueue *pwq;
5377a0a1a5fdSTejun Heo 
537868e13a67SLai Jiangshan 	mutex_lock(&wq_pool_mutex);
5379a0a1a5fdSTejun Heo 
53806183c009STejun Heo 	WARN_ON_ONCE(!workqueue_freezing);
5381a0a1a5fdSTejun Heo 
538224b8a847STejun Heo 	list_for_each_entry(wq, &workqueues, list) {
538324b8a847STejun Heo 		if (!(wq->flags & WQ_FREEZABLE))
538424b8a847STejun Heo 			continue;
5385a0a1a5fdSTejun Heo 		/*
5386a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
5387a0a1a5fdSTejun Heo 		 * to peek without lock.
5388a0a1a5fdSTejun Heo 		 */
538924acfb71SThomas Gleixner 		rcu_read_lock();
539024b8a847STejun Heo 		for_each_pwq(pwq, wq) {
53916183c009STejun Heo 			WARN_ON_ONCE(pwq->nr_active < 0);
5392112202d9STejun Heo 			if (pwq->nr_active) {
5393a0a1a5fdSTejun Heo 				busy = true;
539424acfb71SThomas Gleixner 				rcu_read_unlock();
5395a0a1a5fdSTejun Heo 				goto out_unlock;
5396a0a1a5fdSTejun Heo 			}
5397a0a1a5fdSTejun Heo 		}
539824acfb71SThomas Gleixner 		rcu_read_unlock();
5399a0a1a5fdSTejun Heo 	}
5400a0a1a5fdSTejun Heo out_unlock:
540168e13a67SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
5402a0a1a5fdSTejun Heo 	return busy;
5403a0a1a5fdSTejun Heo }
5404a0a1a5fdSTejun Heo 
5405a0a1a5fdSTejun Heo /**
5406a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
5407a0a1a5fdSTejun Heo  *
5408a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
5409706026c2STejun Heo  * frozen works are transferred to their respective pool worklists.
5410a0a1a5fdSTejun Heo  *
5411a0a1a5fdSTejun Heo  * CONTEXT:
5412a357fc03SLai Jiangshan  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5413a0a1a5fdSTejun Heo  */
5414a0a1a5fdSTejun Heo void thaw_workqueues(void)
5415a0a1a5fdSTejun Heo {
541624b8a847STejun Heo 	struct workqueue_struct *wq;
541724b8a847STejun Heo 	struct pool_workqueue *pwq;
5418a0a1a5fdSTejun Heo 
541968e13a67SLai Jiangshan 	mutex_lock(&wq_pool_mutex);
5420a0a1a5fdSTejun Heo 
5421a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
5422a0a1a5fdSTejun Heo 		goto out_unlock;
5423a0a1a5fdSTejun Heo 
542474b414eaSLai Jiangshan 	workqueue_freezing = false;
542524b8a847STejun Heo 
542624b8a847STejun Heo 	/* restore max_active and repopulate worklist */
542724b8a847STejun Heo 	list_for_each_entry(wq, &workqueues, list) {
5428a357fc03SLai Jiangshan 		mutex_lock(&wq->mutex);
5429699ce097STejun Heo 		for_each_pwq(pwq, wq)
5430699ce097STejun Heo 			pwq_adjust_max_active(pwq);
5431a357fc03SLai Jiangshan 		mutex_unlock(&wq->mutex);
543224b8a847STejun Heo 	}
543324b8a847STejun Heo 
5434a0a1a5fdSTejun Heo out_unlock:
543568e13a67SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
5436a0a1a5fdSTejun Heo }
5437a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
5438a0a1a5fdSTejun Heo 
543999c621efSLai Jiangshan static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
5440042f7df1SLai Jiangshan {
5441042f7df1SLai Jiangshan 	LIST_HEAD(ctxs);
5442042f7df1SLai Jiangshan 	int ret = 0;
5443042f7df1SLai Jiangshan 	struct workqueue_struct *wq;
5444042f7df1SLai Jiangshan 	struct apply_wqattrs_ctx *ctx, *n;
5445042f7df1SLai Jiangshan 
5446042f7df1SLai Jiangshan 	lockdep_assert_held(&wq_pool_mutex);
5447042f7df1SLai Jiangshan 
5448042f7df1SLai Jiangshan 	list_for_each_entry(wq, &workqueues, list) {
5449042f7df1SLai Jiangshan 		if (!(wq->flags & WQ_UNBOUND))
5450042f7df1SLai Jiangshan 			continue;
5451042f7df1SLai Jiangshan 		/* creating multiple pwqs breaks ordering guarantee */
5452042f7df1SLai Jiangshan 		if (wq->flags & __WQ_ORDERED)
5453042f7df1SLai Jiangshan 			continue;
5454042f7df1SLai Jiangshan 
545599c621efSLai Jiangshan 		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
5456042f7df1SLai Jiangshan 		if (!ctx) {
5457042f7df1SLai Jiangshan 			ret = -ENOMEM;
5458042f7df1SLai Jiangshan 			break;
5459042f7df1SLai Jiangshan 		}
5460042f7df1SLai Jiangshan 
5461042f7df1SLai Jiangshan 		list_add_tail(&ctx->list, &ctxs);
5462042f7df1SLai Jiangshan 	}
5463042f7df1SLai Jiangshan 
5464042f7df1SLai Jiangshan 	list_for_each_entry_safe(ctx, n, &ctxs, list) {
5465042f7df1SLai Jiangshan 		if (!ret)
5466042f7df1SLai Jiangshan 			apply_wqattrs_commit(ctx);
5467042f7df1SLai Jiangshan 		apply_wqattrs_cleanup(ctx);
5468042f7df1SLai Jiangshan 	}
5469042f7df1SLai Jiangshan 
547099c621efSLai Jiangshan 	if (!ret) {
547199c621efSLai Jiangshan 		mutex_lock(&wq_pool_attach_mutex);
547299c621efSLai Jiangshan 		cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
547399c621efSLai Jiangshan 		mutex_unlock(&wq_pool_attach_mutex);
547499c621efSLai Jiangshan 	}
5475042f7df1SLai Jiangshan 	return ret;
5476042f7df1SLai Jiangshan }
5477042f7df1SLai Jiangshan 
5478042f7df1SLai Jiangshan /**
5479042f7df1SLai Jiangshan  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5480042f7df1SLai Jiangshan  *  @cpumask: the cpumask to set
5481042f7df1SLai Jiangshan  *
5482042f7df1SLai Jiangshan  *  The low-level workqueues cpumask is a global cpumask that limits
5483042f7df1SLai Jiangshan  *  the affinity of all unbound workqueues.  This function check the @cpumask
5484042f7df1SLai Jiangshan  *  and apply it to all unbound workqueues and updates all pwqs of them.
5485042f7df1SLai Jiangshan  *
548667dc8325SCai Huoqing  *  Return:	0	- Success
5487042f7df1SLai Jiangshan  *  		-EINVAL	- Invalid @cpumask
5488042f7df1SLai Jiangshan  *  		-ENOMEM	- Failed to allocate memory for attrs or pwqs.
5489042f7df1SLai Jiangshan  */
5490042f7df1SLai Jiangshan int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5491042f7df1SLai Jiangshan {
5492042f7df1SLai Jiangshan 	int ret = -EINVAL;
5493042f7df1SLai Jiangshan 
5494c98a9805STal Shorer 	/*
5495c98a9805STal Shorer 	 * Not excluding isolated cpus on purpose.
5496c98a9805STal Shorer 	 * If the user wishes to include them, we allow that.
5497c98a9805STal Shorer 	 */
5498042f7df1SLai Jiangshan 	cpumask_and(cpumask, cpumask, cpu_possible_mask);
5499042f7df1SLai Jiangshan 	if (!cpumask_empty(cpumask)) {
5500a0111cf6SLai Jiangshan 		apply_wqattrs_lock();
5501d25302e4SMenglong Dong 		if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5502d25302e4SMenglong Dong 			ret = 0;
5503d25302e4SMenglong Dong 			goto out_unlock;
5504d25302e4SMenglong Dong 		}
5505d25302e4SMenglong Dong 
550699c621efSLai Jiangshan 		ret = workqueue_apply_unbound_cpumask(cpumask);
5507042f7df1SLai Jiangshan 
5508d25302e4SMenglong Dong out_unlock:
5509a0111cf6SLai Jiangshan 		apply_wqattrs_unlock();
5510042f7df1SLai Jiangshan 	}
5511042f7df1SLai Jiangshan 
5512042f7df1SLai Jiangshan 	return ret;
5513042f7df1SLai Jiangshan }
5514042f7df1SLai Jiangshan 
55156ba94429SFrederic Weisbecker #ifdef CONFIG_SYSFS
55166ba94429SFrederic Weisbecker /*
55176ba94429SFrederic Weisbecker  * Workqueues with WQ_SYSFS flag set is visible to userland via
55186ba94429SFrederic Weisbecker  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
55196ba94429SFrederic Weisbecker  * following attributes.
55206ba94429SFrederic Weisbecker  *
55216ba94429SFrederic Weisbecker  *  per_cpu	RO bool	: whether the workqueue is per-cpu or unbound
55226ba94429SFrederic Weisbecker  *  max_active	RW int	: maximum number of in-flight work items
55236ba94429SFrederic Weisbecker  *
55246ba94429SFrederic Weisbecker  * Unbound workqueues have the following extra attributes.
55256ba94429SFrederic Weisbecker  *
55269a19b463SWang Long  *  pool_ids	RO int	: the associated pool IDs for each node
55276ba94429SFrederic Weisbecker  *  nice	RW int	: nice value of the workers
55286ba94429SFrederic Weisbecker  *  cpumask	RW mask	: bitmask of allowed CPUs for the workers
55299a19b463SWang Long  *  numa	RW bool	: whether enable NUMA affinity
55306ba94429SFrederic Weisbecker  */
55316ba94429SFrederic Weisbecker struct wq_device {
55326ba94429SFrederic Weisbecker 	struct workqueue_struct		*wq;
55336ba94429SFrederic Weisbecker 	struct device			dev;
55346ba94429SFrederic Weisbecker };
55356ba94429SFrederic Weisbecker 
55366ba94429SFrederic Weisbecker static struct workqueue_struct *dev_to_wq(struct device *dev)
55376ba94429SFrederic Weisbecker {
55386ba94429SFrederic Weisbecker 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
55396ba94429SFrederic Weisbecker 
55406ba94429SFrederic Weisbecker 	return wq_dev->wq;
55416ba94429SFrederic Weisbecker }
55426ba94429SFrederic Weisbecker 
55436ba94429SFrederic Weisbecker static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
55446ba94429SFrederic Weisbecker 			    char *buf)
55456ba94429SFrederic Weisbecker {
55466ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
55476ba94429SFrederic Weisbecker 
55486ba94429SFrederic Weisbecker 	return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
55496ba94429SFrederic Weisbecker }
55506ba94429SFrederic Weisbecker static DEVICE_ATTR_RO(per_cpu);
55516ba94429SFrederic Weisbecker 
55526ba94429SFrederic Weisbecker static ssize_t max_active_show(struct device *dev,
55536ba94429SFrederic Weisbecker 			       struct device_attribute *attr, char *buf)
55546ba94429SFrederic Weisbecker {
55556ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
55566ba94429SFrederic Weisbecker 
55576ba94429SFrederic Weisbecker 	return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
55586ba94429SFrederic Weisbecker }
55596ba94429SFrederic Weisbecker 
55606ba94429SFrederic Weisbecker static ssize_t max_active_store(struct device *dev,
55616ba94429SFrederic Weisbecker 				struct device_attribute *attr, const char *buf,
55626ba94429SFrederic Weisbecker 				size_t count)
55636ba94429SFrederic Weisbecker {
55646ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
55656ba94429SFrederic Weisbecker 	int val;
55666ba94429SFrederic Weisbecker 
55676ba94429SFrederic Weisbecker 	if (sscanf(buf, "%d", &val) != 1 || val <= 0)
55686ba94429SFrederic Weisbecker 		return -EINVAL;
55696ba94429SFrederic Weisbecker 
55706ba94429SFrederic Weisbecker 	workqueue_set_max_active(wq, val);
55716ba94429SFrederic Weisbecker 	return count;
55726ba94429SFrederic Weisbecker }
55736ba94429SFrederic Weisbecker static DEVICE_ATTR_RW(max_active);
55746ba94429SFrederic Weisbecker 
55756ba94429SFrederic Weisbecker static struct attribute *wq_sysfs_attrs[] = {
55766ba94429SFrederic Weisbecker 	&dev_attr_per_cpu.attr,
55776ba94429SFrederic Weisbecker 	&dev_attr_max_active.attr,
55786ba94429SFrederic Weisbecker 	NULL,
55796ba94429SFrederic Weisbecker };
55806ba94429SFrederic Weisbecker ATTRIBUTE_GROUPS(wq_sysfs);
55816ba94429SFrederic Weisbecker 
55826ba94429SFrederic Weisbecker static ssize_t wq_pool_ids_show(struct device *dev,
55836ba94429SFrederic Weisbecker 				struct device_attribute *attr, char *buf)
55846ba94429SFrederic Weisbecker {
55856ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
55866ba94429SFrederic Weisbecker 	const char *delim = "";
55876ba94429SFrederic Weisbecker 	int node, written = 0;
55886ba94429SFrederic Weisbecker 
5589ffd8bea8SSebastian Andrzej Siewior 	cpus_read_lock();
559024acfb71SThomas Gleixner 	rcu_read_lock();
55916ba94429SFrederic Weisbecker 	for_each_node(node) {
55926ba94429SFrederic Weisbecker 		written += scnprintf(buf + written, PAGE_SIZE - written,
55936ba94429SFrederic Weisbecker 				     "%s%d:%d", delim, node,
55946ba94429SFrederic Weisbecker 				     unbound_pwq_by_node(wq, node)->pool->id);
55956ba94429SFrederic Weisbecker 		delim = " ";
55966ba94429SFrederic Weisbecker 	}
55976ba94429SFrederic Weisbecker 	written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
559824acfb71SThomas Gleixner 	rcu_read_unlock();
5599ffd8bea8SSebastian Andrzej Siewior 	cpus_read_unlock();
56006ba94429SFrederic Weisbecker 
56016ba94429SFrederic Weisbecker 	return written;
56026ba94429SFrederic Weisbecker }
56036ba94429SFrederic Weisbecker 
56046ba94429SFrederic Weisbecker static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
56056ba94429SFrederic Weisbecker 			    char *buf)
56066ba94429SFrederic Weisbecker {
56076ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
56086ba94429SFrederic Weisbecker 	int written;
56096ba94429SFrederic Weisbecker 
56106ba94429SFrederic Weisbecker 	mutex_lock(&wq->mutex);
56116ba94429SFrederic Weisbecker 	written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
56126ba94429SFrederic Weisbecker 	mutex_unlock(&wq->mutex);
56136ba94429SFrederic Weisbecker 
56146ba94429SFrederic Weisbecker 	return written;
56156ba94429SFrederic Weisbecker }
56166ba94429SFrederic Weisbecker 
56176ba94429SFrederic Weisbecker /* prepare workqueue_attrs for sysfs store operations */
56186ba94429SFrederic Weisbecker static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
56196ba94429SFrederic Weisbecker {
56206ba94429SFrederic Weisbecker 	struct workqueue_attrs *attrs;
56216ba94429SFrederic Weisbecker 
5622899a94feSLai Jiangshan 	lockdep_assert_held(&wq_pool_mutex);
5623899a94feSLai Jiangshan 
5624be69d00dSThomas Gleixner 	attrs = alloc_workqueue_attrs();
56256ba94429SFrederic Weisbecker 	if (!attrs)
56266ba94429SFrederic Weisbecker 		return NULL;
56276ba94429SFrederic Weisbecker 
56286ba94429SFrederic Weisbecker 	copy_workqueue_attrs(attrs, wq->unbound_attrs);
56296ba94429SFrederic Weisbecker 	return attrs;
56306ba94429SFrederic Weisbecker }
56316ba94429SFrederic Weisbecker 
56326ba94429SFrederic Weisbecker static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
56336ba94429SFrederic Weisbecker 			     const char *buf, size_t count)
56346ba94429SFrederic Weisbecker {
56356ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
56366ba94429SFrederic Weisbecker 	struct workqueue_attrs *attrs;
5637d4d3e257SLai Jiangshan 	int ret = -ENOMEM;
5638d4d3e257SLai Jiangshan 
5639d4d3e257SLai Jiangshan 	apply_wqattrs_lock();
56406ba94429SFrederic Weisbecker 
56416ba94429SFrederic Weisbecker 	attrs = wq_sysfs_prep_attrs(wq);
56426ba94429SFrederic Weisbecker 	if (!attrs)
5643d4d3e257SLai Jiangshan 		goto out_unlock;
56446ba94429SFrederic Weisbecker 
56456ba94429SFrederic Weisbecker 	if (sscanf(buf, "%d", &attrs->nice) == 1 &&
56466ba94429SFrederic Weisbecker 	    attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5647d4d3e257SLai Jiangshan 		ret = apply_workqueue_attrs_locked(wq, attrs);
56486ba94429SFrederic Weisbecker 	else
56496ba94429SFrederic Weisbecker 		ret = -EINVAL;
56506ba94429SFrederic Weisbecker 
5651d4d3e257SLai Jiangshan out_unlock:
5652d4d3e257SLai Jiangshan 	apply_wqattrs_unlock();
56536ba94429SFrederic Weisbecker 	free_workqueue_attrs(attrs);
56546ba94429SFrederic Weisbecker 	return ret ?: count;
56556ba94429SFrederic Weisbecker }
56566ba94429SFrederic Weisbecker 
56576ba94429SFrederic Weisbecker static ssize_t wq_cpumask_show(struct device *dev,
56586ba94429SFrederic Weisbecker 			       struct device_attribute *attr, char *buf)
56596ba94429SFrederic Weisbecker {
56606ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
56616ba94429SFrederic Weisbecker 	int written;
56626ba94429SFrederic Weisbecker 
56636ba94429SFrederic Weisbecker 	mutex_lock(&wq->mutex);
56646ba94429SFrederic Weisbecker 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
56656ba94429SFrederic Weisbecker 			    cpumask_pr_args(wq->unbound_attrs->cpumask));
56666ba94429SFrederic Weisbecker 	mutex_unlock(&wq->mutex);
56676ba94429SFrederic Weisbecker 	return written;
56686ba94429SFrederic Weisbecker }
56696ba94429SFrederic Weisbecker 
56706ba94429SFrederic Weisbecker static ssize_t wq_cpumask_store(struct device *dev,
56716ba94429SFrederic Weisbecker 				struct device_attribute *attr,
56726ba94429SFrederic Weisbecker 				const char *buf, size_t count)
56736ba94429SFrederic Weisbecker {
56746ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
56756ba94429SFrederic Weisbecker 	struct workqueue_attrs *attrs;
5676d4d3e257SLai Jiangshan 	int ret = -ENOMEM;
5677d4d3e257SLai Jiangshan 
5678d4d3e257SLai Jiangshan 	apply_wqattrs_lock();
56796ba94429SFrederic Weisbecker 
56806ba94429SFrederic Weisbecker 	attrs = wq_sysfs_prep_attrs(wq);
56816ba94429SFrederic Weisbecker 	if (!attrs)
5682d4d3e257SLai Jiangshan 		goto out_unlock;
56836ba94429SFrederic Weisbecker 
56846ba94429SFrederic Weisbecker 	ret = cpumask_parse(buf, attrs->cpumask);
56856ba94429SFrederic Weisbecker 	if (!ret)
5686d4d3e257SLai Jiangshan 		ret = apply_workqueue_attrs_locked(wq, attrs);
56876ba94429SFrederic Weisbecker 
5688d4d3e257SLai Jiangshan out_unlock:
5689d4d3e257SLai Jiangshan 	apply_wqattrs_unlock();
56906ba94429SFrederic Weisbecker 	free_workqueue_attrs(attrs);
56916ba94429SFrederic Weisbecker 	return ret ?: count;
56926ba94429SFrederic Weisbecker }
56936ba94429SFrederic Weisbecker 
56946ba94429SFrederic Weisbecker static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
56956ba94429SFrederic Weisbecker 			    char *buf)
56966ba94429SFrederic Weisbecker {
56976ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
56986ba94429SFrederic Weisbecker 	int written;
56996ba94429SFrederic Weisbecker 
57006ba94429SFrederic Weisbecker 	mutex_lock(&wq->mutex);
57016ba94429SFrederic Weisbecker 	written = scnprintf(buf, PAGE_SIZE, "%d\n",
57026ba94429SFrederic Weisbecker 			    !wq->unbound_attrs->no_numa);
57036ba94429SFrederic Weisbecker 	mutex_unlock(&wq->mutex);
57046ba94429SFrederic Weisbecker 
57056ba94429SFrederic Weisbecker 	return written;
57066ba94429SFrederic Weisbecker }
57076ba94429SFrederic Weisbecker 
57086ba94429SFrederic Weisbecker static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
57096ba94429SFrederic Weisbecker 			     const char *buf, size_t count)
57106ba94429SFrederic Weisbecker {
57116ba94429SFrederic Weisbecker 	struct workqueue_struct *wq = dev_to_wq(dev);
57126ba94429SFrederic Weisbecker 	struct workqueue_attrs *attrs;
5713d4d3e257SLai Jiangshan 	int v, ret = -ENOMEM;
5714d4d3e257SLai Jiangshan 
5715d4d3e257SLai Jiangshan 	apply_wqattrs_lock();
57166ba94429SFrederic Weisbecker 
57176ba94429SFrederic Weisbecker 	attrs = wq_sysfs_prep_attrs(wq);
57186ba94429SFrederic Weisbecker 	if (!attrs)
5719d4d3e257SLai Jiangshan 		goto out_unlock;
57206ba94429SFrederic Weisbecker 
57216ba94429SFrederic Weisbecker 	ret = -EINVAL;
57226ba94429SFrederic Weisbecker 	if (sscanf(buf, "%d", &v) == 1) {
57236ba94429SFrederic Weisbecker 		attrs->no_numa = !v;
5724d4d3e257SLai Jiangshan 		ret = apply_workqueue_attrs_locked(wq, attrs);
57256ba94429SFrederic Weisbecker 	}
57266ba94429SFrederic Weisbecker 
5727d4d3e257SLai Jiangshan out_unlock:
5728d4d3e257SLai Jiangshan 	apply_wqattrs_unlock();
57296ba94429SFrederic Weisbecker 	free_workqueue_attrs(attrs);
57306ba94429SFrederic Weisbecker 	return ret ?: count;
57316ba94429SFrederic Weisbecker }
57326ba94429SFrederic Weisbecker 
57336ba94429SFrederic Weisbecker static struct device_attribute wq_sysfs_unbound_attrs[] = {
57346ba94429SFrederic Weisbecker 	__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
57356ba94429SFrederic Weisbecker 	__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
57366ba94429SFrederic Weisbecker 	__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
57376ba94429SFrederic Weisbecker 	__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
57386ba94429SFrederic Weisbecker 	__ATTR_NULL,
57396ba94429SFrederic Weisbecker };
57406ba94429SFrederic Weisbecker 
57416ba94429SFrederic Weisbecker static struct bus_type wq_subsys = {
57426ba94429SFrederic Weisbecker 	.name				= "workqueue",
57436ba94429SFrederic Weisbecker 	.dev_groups			= wq_sysfs_groups,
57446ba94429SFrederic Weisbecker };
57456ba94429SFrederic Weisbecker 
5746b05a7928SFrederic Weisbecker static ssize_t wq_unbound_cpumask_show(struct device *dev,
5747b05a7928SFrederic Weisbecker 		struct device_attribute *attr, char *buf)
5748b05a7928SFrederic Weisbecker {
5749b05a7928SFrederic Weisbecker 	int written;
5750b05a7928SFrederic Weisbecker 
5751042f7df1SLai Jiangshan 	mutex_lock(&wq_pool_mutex);
5752b05a7928SFrederic Weisbecker 	written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5753b05a7928SFrederic Weisbecker 			    cpumask_pr_args(wq_unbound_cpumask));
5754042f7df1SLai Jiangshan 	mutex_unlock(&wq_pool_mutex);
5755b05a7928SFrederic Weisbecker 
5756b05a7928SFrederic Weisbecker 	return written;
5757b05a7928SFrederic Weisbecker }
5758b05a7928SFrederic Weisbecker 
5759042f7df1SLai Jiangshan static ssize_t wq_unbound_cpumask_store(struct device *dev,
5760042f7df1SLai Jiangshan 		struct device_attribute *attr, const char *buf, size_t count)
5761042f7df1SLai Jiangshan {
5762042f7df1SLai Jiangshan 	cpumask_var_t cpumask;
5763042f7df1SLai Jiangshan 	int ret;
5764042f7df1SLai Jiangshan 
5765042f7df1SLai Jiangshan 	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5766042f7df1SLai Jiangshan 		return -ENOMEM;
5767042f7df1SLai Jiangshan 
5768042f7df1SLai Jiangshan 	ret = cpumask_parse(buf, cpumask);
5769042f7df1SLai Jiangshan 	if (!ret)
5770042f7df1SLai Jiangshan 		ret = workqueue_set_unbound_cpumask(cpumask);
5771042f7df1SLai Jiangshan 
5772042f7df1SLai Jiangshan 	free_cpumask_var(cpumask);
5773042f7df1SLai Jiangshan 	return ret ? ret : count;
5774042f7df1SLai Jiangshan }
5775042f7df1SLai Jiangshan 
5776b05a7928SFrederic Weisbecker static struct device_attribute wq_sysfs_cpumask_attr =
5777042f7df1SLai Jiangshan 	__ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5778042f7df1SLai Jiangshan 	       wq_unbound_cpumask_store);
5779b05a7928SFrederic Weisbecker 
57806ba94429SFrederic Weisbecker static int __init wq_sysfs_init(void)
57816ba94429SFrederic Weisbecker {
5782b05a7928SFrederic Weisbecker 	int err;
5783b05a7928SFrederic Weisbecker 
5784b05a7928SFrederic Weisbecker 	err = subsys_virtual_register(&wq_subsys, NULL);
5785b05a7928SFrederic Weisbecker 	if (err)
5786b05a7928SFrederic Weisbecker 		return err;
5787b05a7928SFrederic Weisbecker 
5788b05a7928SFrederic Weisbecker 	return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
57896ba94429SFrederic Weisbecker }
57906ba94429SFrederic Weisbecker core_initcall(wq_sysfs_init);
57916ba94429SFrederic Weisbecker 
57926ba94429SFrederic Weisbecker static void wq_device_release(struct device *dev)
57936ba94429SFrederic Weisbecker {
57946ba94429SFrederic Weisbecker 	struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
57956ba94429SFrederic Weisbecker 
57966ba94429SFrederic Weisbecker 	kfree(wq_dev);
57976ba94429SFrederic Weisbecker }
57986ba94429SFrederic Weisbecker 
57996ba94429SFrederic Weisbecker /**
58006ba94429SFrederic Weisbecker  * workqueue_sysfs_register - make a workqueue visible in sysfs
58016ba94429SFrederic Weisbecker  * @wq: the workqueue to register
58026ba94429SFrederic Weisbecker  *
58036ba94429SFrederic Weisbecker  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
58046ba94429SFrederic Weisbecker  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
58056ba94429SFrederic Weisbecker  * which is the preferred method.
58066ba94429SFrederic Weisbecker  *
58076ba94429SFrederic Weisbecker  * Workqueue user should use this function directly iff it wants to apply
58086ba94429SFrederic Weisbecker  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
58096ba94429SFrederic Weisbecker  * apply_workqueue_attrs() may race against userland updating the
58106ba94429SFrederic Weisbecker  * attributes.
58116ba94429SFrederic Weisbecker  *
58126ba94429SFrederic Weisbecker  * Return: 0 on success, -errno on failure.
58136ba94429SFrederic Weisbecker  */
58146ba94429SFrederic Weisbecker int workqueue_sysfs_register(struct workqueue_struct *wq)
58156ba94429SFrederic Weisbecker {
58166ba94429SFrederic Weisbecker 	struct wq_device *wq_dev;
58176ba94429SFrederic Weisbecker 	int ret;
58186ba94429SFrederic Weisbecker 
58196ba94429SFrederic Weisbecker 	/*
5820402dd89dSShailendra Verma 	 * Adjusting max_active or creating new pwqs by applying
58216ba94429SFrederic Weisbecker 	 * attributes breaks ordering guarantee.  Disallow exposing ordered
58226ba94429SFrederic Weisbecker 	 * workqueues.
58236ba94429SFrederic Weisbecker 	 */
58240a94efb5STejun Heo 	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
58256ba94429SFrederic Weisbecker 		return -EINVAL;
58266ba94429SFrederic Weisbecker 
58276ba94429SFrederic Weisbecker 	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
58286ba94429SFrederic Weisbecker 	if (!wq_dev)
58296ba94429SFrederic Weisbecker 		return -ENOMEM;
58306ba94429SFrederic Weisbecker 
58316ba94429SFrederic Weisbecker 	wq_dev->wq = wq;
58326ba94429SFrederic Weisbecker 	wq_dev->dev.bus = &wq_subsys;
58336ba94429SFrederic Weisbecker 	wq_dev->dev.release = wq_device_release;
583423217b44SLars-Peter Clausen 	dev_set_name(&wq_dev->dev, "%s", wq->name);
58356ba94429SFrederic Weisbecker 
58366ba94429SFrederic Weisbecker 	/*
58376ba94429SFrederic Weisbecker 	 * unbound_attrs are created separately.  Suppress uevent until
58386ba94429SFrederic Weisbecker 	 * everything is ready.
58396ba94429SFrederic Weisbecker 	 */
58406ba94429SFrederic Weisbecker 	dev_set_uevent_suppress(&wq_dev->dev, true);
58416ba94429SFrederic Weisbecker 
58426ba94429SFrederic Weisbecker 	ret = device_register(&wq_dev->dev);
58436ba94429SFrederic Weisbecker 	if (ret) {
5844537f4146SArvind Yadav 		put_device(&wq_dev->dev);
58456ba94429SFrederic Weisbecker 		wq->wq_dev = NULL;
58466ba94429SFrederic Weisbecker 		return ret;
58476ba94429SFrederic Weisbecker 	}
58486ba94429SFrederic Weisbecker 
58496ba94429SFrederic Weisbecker 	if (wq->flags & WQ_UNBOUND) {
58506ba94429SFrederic Weisbecker 		struct device_attribute *attr;
58516ba94429SFrederic Weisbecker 
58526ba94429SFrederic Weisbecker 		for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
58536ba94429SFrederic Weisbecker 			ret = device_create_file(&wq_dev->dev, attr);
58546ba94429SFrederic Weisbecker 			if (ret) {
58556ba94429SFrederic Weisbecker 				device_unregister(&wq_dev->dev);
58566ba94429SFrederic Weisbecker 				wq->wq_dev = NULL;
58576ba94429SFrederic Weisbecker 				return ret;
58586ba94429SFrederic Weisbecker 			}
58596ba94429SFrederic Weisbecker 		}
58606ba94429SFrederic Weisbecker 	}
58616ba94429SFrederic Weisbecker 
58626ba94429SFrederic Weisbecker 	dev_set_uevent_suppress(&wq_dev->dev, false);
58636ba94429SFrederic Weisbecker 	kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
58646ba94429SFrederic Weisbecker 	return 0;
58656ba94429SFrederic Weisbecker }
58666ba94429SFrederic Weisbecker 
58676ba94429SFrederic Weisbecker /**
58686ba94429SFrederic Weisbecker  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
58696ba94429SFrederic Weisbecker  * @wq: the workqueue to unregister
58706ba94429SFrederic Weisbecker  *
58716ba94429SFrederic Weisbecker  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
58726ba94429SFrederic Weisbecker  */
58736ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
58746ba94429SFrederic Weisbecker {
58756ba94429SFrederic Weisbecker 	struct wq_device *wq_dev = wq->wq_dev;
58766ba94429SFrederic Weisbecker 
58776ba94429SFrederic Weisbecker 	if (!wq->wq_dev)
58786ba94429SFrederic Weisbecker 		return;
58796ba94429SFrederic Weisbecker 
58806ba94429SFrederic Weisbecker 	wq->wq_dev = NULL;
58816ba94429SFrederic Weisbecker 	device_unregister(&wq_dev->dev);
58826ba94429SFrederic Weisbecker }
58836ba94429SFrederic Weisbecker #else	/* CONFIG_SYSFS */
58846ba94429SFrederic Weisbecker static void workqueue_sysfs_unregister(struct workqueue_struct *wq)	{ }
58856ba94429SFrederic Weisbecker #endif	/* CONFIG_SYSFS */
58866ba94429SFrederic Weisbecker 
588782607adcSTejun Heo /*
588882607adcSTejun Heo  * Workqueue watchdog.
588982607adcSTejun Heo  *
589082607adcSTejun Heo  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
589182607adcSTejun Heo  * flush dependency, a concurrency managed work item which stays RUNNING
589282607adcSTejun Heo  * indefinitely.  Workqueue stalls can be very difficult to debug as the
589382607adcSTejun Heo  * usual warning mechanisms don't trigger and internal workqueue state is
589482607adcSTejun Heo  * largely opaque.
589582607adcSTejun Heo  *
589682607adcSTejun Heo  * Workqueue watchdog monitors all worker pools periodically and dumps
589782607adcSTejun Heo  * state if some pools failed to make forward progress for a while where
589882607adcSTejun Heo  * forward progress is defined as the first item on ->worklist changing.
589982607adcSTejun Heo  *
590082607adcSTejun Heo  * This mechanism is controlled through the kernel parameter
590182607adcSTejun Heo  * "workqueue.watchdog_thresh" which can be updated at runtime through the
590282607adcSTejun Heo  * corresponding sysfs parameter file.
590382607adcSTejun Heo  */
590482607adcSTejun Heo #ifdef CONFIG_WQ_WATCHDOG
590582607adcSTejun Heo 
590682607adcSTejun Heo static unsigned long wq_watchdog_thresh = 30;
59075cd79d6aSKees Cook static struct timer_list wq_watchdog_timer;
590882607adcSTejun Heo 
590982607adcSTejun Heo static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
591082607adcSTejun Heo static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
591182607adcSTejun Heo 
591282607adcSTejun Heo static void wq_watchdog_reset_touched(void)
591382607adcSTejun Heo {
591482607adcSTejun Heo 	int cpu;
591582607adcSTejun Heo 
591682607adcSTejun Heo 	wq_watchdog_touched = jiffies;
591782607adcSTejun Heo 	for_each_possible_cpu(cpu)
591882607adcSTejun Heo 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
591982607adcSTejun Heo }
592082607adcSTejun Heo 
59215cd79d6aSKees Cook static void wq_watchdog_timer_fn(struct timer_list *unused)
592282607adcSTejun Heo {
592382607adcSTejun Heo 	unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
592482607adcSTejun Heo 	bool lockup_detected = false;
5925940d71c6SSergey Senozhatsky 	unsigned long now = jiffies;
592682607adcSTejun Heo 	struct worker_pool *pool;
592782607adcSTejun Heo 	int pi;
592882607adcSTejun Heo 
592982607adcSTejun Heo 	if (!thresh)
593082607adcSTejun Heo 		return;
593182607adcSTejun Heo 
593282607adcSTejun Heo 	rcu_read_lock();
593382607adcSTejun Heo 
593482607adcSTejun Heo 	for_each_pool(pool, pi) {
593582607adcSTejun Heo 		unsigned long pool_ts, touched, ts;
593682607adcSTejun Heo 
593782607adcSTejun Heo 		if (list_empty(&pool->worklist))
593882607adcSTejun Heo 			continue;
593982607adcSTejun Heo 
5940940d71c6SSergey Senozhatsky 		/*
5941940d71c6SSergey Senozhatsky 		 * If a virtual machine is stopped by the host it can look to
5942940d71c6SSergey Senozhatsky 		 * the watchdog like a stall.
5943940d71c6SSergey Senozhatsky 		 */
5944940d71c6SSergey Senozhatsky 		kvm_check_and_clear_guest_paused();
5945940d71c6SSergey Senozhatsky 
594682607adcSTejun Heo 		/* get the latest of pool and touched timestamps */
594789e28ce6SWang Qing 		if (pool->cpu >= 0)
594889e28ce6SWang Qing 			touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
594989e28ce6SWang Qing 		else
595082607adcSTejun Heo 			touched = READ_ONCE(wq_watchdog_touched);
595189e28ce6SWang Qing 		pool_ts = READ_ONCE(pool->watchdog_ts);
595282607adcSTejun Heo 
595382607adcSTejun Heo 		if (time_after(pool_ts, touched))
595482607adcSTejun Heo 			ts = pool_ts;
595582607adcSTejun Heo 		else
595682607adcSTejun Heo 			ts = touched;
595782607adcSTejun Heo 
595882607adcSTejun Heo 		/* did we stall? */
5959940d71c6SSergey Senozhatsky 		if (time_after(now, ts + thresh)) {
596082607adcSTejun Heo 			lockup_detected = true;
596182607adcSTejun Heo 			pr_emerg("BUG: workqueue lockup - pool");
596282607adcSTejun Heo 			pr_cont_pool_info(pool);
596382607adcSTejun Heo 			pr_cont(" stuck for %us!\n",
5964940d71c6SSergey Senozhatsky 				jiffies_to_msecs(now - pool_ts) / 1000);
596582607adcSTejun Heo 		}
596682607adcSTejun Heo 	}
596782607adcSTejun Heo 
596882607adcSTejun Heo 	rcu_read_unlock();
596982607adcSTejun Heo 
597082607adcSTejun Heo 	if (lockup_detected)
597155df0933SImran Khan 		show_all_workqueues();
597282607adcSTejun Heo 
597382607adcSTejun Heo 	wq_watchdog_reset_touched();
597482607adcSTejun Heo 	mod_timer(&wq_watchdog_timer, jiffies + thresh);
597582607adcSTejun Heo }
597682607adcSTejun Heo 
5977cb9d7fd5SVincent Whitchurch notrace void wq_watchdog_touch(int cpu)
597882607adcSTejun Heo {
597982607adcSTejun Heo 	if (cpu >= 0)
598082607adcSTejun Heo 		per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
598189e28ce6SWang Qing 
598282607adcSTejun Heo 	wq_watchdog_touched = jiffies;
598382607adcSTejun Heo }
598482607adcSTejun Heo 
598582607adcSTejun Heo static void wq_watchdog_set_thresh(unsigned long thresh)
598682607adcSTejun Heo {
598782607adcSTejun Heo 	wq_watchdog_thresh = 0;
598882607adcSTejun Heo 	del_timer_sync(&wq_watchdog_timer);
598982607adcSTejun Heo 
599082607adcSTejun Heo 	if (thresh) {
599182607adcSTejun Heo 		wq_watchdog_thresh = thresh;
599282607adcSTejun Heo 		wq_watchdog_reset_touched();
599382607adcSTejun Heo 		mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
599482607adcSTejun Heo 	}
599582607adcSTejun Heo }
599682607adcSTejun Heo 
599782607adcSTejun Heo static int wq_watchdog_param_set_thresh(const char *val,
599882607adcSTejun Heo 					const struct kernel_param *kp)
599982607adcSTejun Heo {
600082607adcSTejun Heo 	unsigned long thresh;
600182607adcSTejun Heo 	int ret;
600282607adcSTejun Heo 
600382607adcSTejun Heo 	ret = kstrtoul(val, 0, &thresh);
600482607adcSTejun Heo 	if (ret)
600582607adcSTejun Heo 		return ret;
600682607adcSTejun Heo 
600782607adcSTejun Heo 	if (system_wq)
600882607adcSTejun Heo 		wq_watchdog_set_thresh(thresh);
600982607adcSTejun Heo 	else
601082607adcSTejun Heo 		wq_watchdog_thresh = thresh;
601182607adcSTejun Heo 
601282607adcSTejun Heo 	return 0;
601382607adcSTejun Heo }
601482607adcSTejun Heo 
601582607adcSTejun Heo static const struct kernel_param_ops wq_watchdog_thresh_ops = {
601682607adcSTejun Heo 	.set	= wq_watchdog_param_set_thresh,
601782607adcSTejun Heo 	.get	= param_get_ulong,
601882607adcSTejun Heo };
601982607adcSTejun Heo 
602082607adcSTejun Heo module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
602182607adcSTejun Heo 		0644);
602282607adcSTejun Heo 
602382607adcSTejun Heo static void wq_watchdog_init(void)
602482607adcSTejun Heo {
60255cd79d6aSKees Cook 	timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
602682607adcSTejun Heo 	wq_watchdog_set_thresh(wq_watchdog_thresh);
602782607adcSTejun Heo }
602882607adcSTejun Heo 
602982607adcSTejun Heo #else	/* CONFIG_WQ_WATCHDOG */
603082607adcSTejun Heo 
603182607adcSTejun Heo static inline void wq_watchdog_init(void) { }
603282607adcSTejun Heo 
603382607adcSTejun Heo #endif	/* CONFIG_WQ_WATCHDOG */
603482607adcSTejun Heo 
6035bce90380STejun Heo static void __init wq_numa_init(void)
6036bce90380STejun Heo {
6037bce90380STejun Heo 	cpumask_var_t *tbl;
6038bce90380STejun Heo 	int node, cpu;
6039bce90380STejun Heo 
6040bce90380STejun Heo 	if (num_possible_nodes() <= 1)
6041bce90380STejun Heo 		return;
6042bce90380STejun Heo 
6043d55262c4STejun Heo 	if (wq_disable_numa) {
6044d55262c4STejun Heo 		pr_info("workqueue: NUMA affinity support disabled\n");
6045d55262c4STejun Heo 		return;
6046d55262c4STejun Heo 	}
6047d55262c4STejun Heo 
6048f728c4a9SZhen Lei 	for_each_possible_cpu(cpu) {
6049f728c4a9SZhen Lei 		if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
6050f728c4a9SZhen Lei 			pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
6051f728c4a9SZhen Lei 			return;
6052f728c4a9SZhen Lei 		}
6053f728c4a9SZhen Lei 	}
6054f728c4a9SZhen Lei 
6055be69d00dSThomas Gleixner 	wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
60564c16bd32STejun Heo 	BUG_ON(!wq_update_unbound_numa_attrs_buf);
60574c16bd32STejun Heo 
6058bce90380STejun Heo 	/*
6059bce90380STejun Heo 	 * We want masks of possible CPUs of each node which isn't readily
6060bce90380STejun Heo 	 * available.  Build one from cpu_to_node() which should have been
6061bce90380STejun Heo 	 * fully initialized by now.
6062bce90380STejun Heo 	 */
60636396bb22SKees Cook 	tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
6064bce90380STejun Heo 	BUG_ON(!tbl);
6065bce90380STejun Heo 
6066bce90380STejun Heo 	for_each_node(node)
60675a6024f1SYasuaki Ishimatsu 		BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
60681be0c25dSTejun Heo 				node_online(node) ? node : NUMA_NO_NODE));
6069bce90380STejun Heo 
6070bce90380STejun Heo 	for_each_possible_cpu(cpu) {
6071bce90380STejun Heo 		node = cpu_to_node(cpu);
6072bce90380STejun Heo 		cpumask_set_cpu(cpu, tbl[node]);
6073bce90380STejun Heo 	}
6074bce90380STejun Heo 
6075bce90380STejun Heo 	wq_numa_possible_cpumask = tbl;
6076bce90380STejun Heo 	wq_numa_enabled = true;
6077bce90380STejun Heo }
6078bce90380STejun Heo 
60793347fa09STejun Heo /**
60803347fa09STejun Heo  * workqueue_init_early - early init for workqueue subsystem
60813347fa09STejun Heo  *
60823347fa09STejun Heo  * This is the first half of two-staged workqueue subsystem initialization
60833347fa09STejun Heo  * and invoked as soon as the bare basics - memory allocation, cpumasks and
60843347fa09STejun Heo  * idr are up.  It sets up all the data structures and system workqueues
60853347fa09STejun Heo  * and allows early boot code to create workqueues and queue/cancel work
60863347fa09STejun Heo  * items.  Actual work item execution starts only after kthreads can be
60873347fa09STejun Heo  * created and scheduled right before early initcalls.
60883347fa09STejun Heo  */
60892333e829SYu Chen void __init workqueue_init_early(void)
60901da177e4SLinus Torvalds {
60917a4e344cSTejun Heo 	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
60927a4e344cSTejun Heo 	int i, cpu;
6093c34056a3STejun Heo 
609410cdb157SLai Jiangshan 	BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
6095e904e6c2STejun Heo 
6096b05a7928SFrederic Weisbecker 	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
609704d4e665SFrederic Weisbecker 	cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
609804d4e665SFrederic Weisbecker 	cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
6099b05a7928SFrederic Weisbecker 
6100e904e6c2STejun Heo 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6101e904e6c2STejun Heo 
6102706026c2STejun Heo 	/* initialize CPU pools */
610329c91e99STejun Heo 	for_each_possible_cpu(cpu) {
61044ce62e9eSTejun Heo 		struct worker_pool *pool;
61058b03ae3cSTejun Heo 
61067a4e344cSTejun Heo 		i = 0;
6107f02ae73aSTejun Heo 		for_each_cpu_worker_pool(pool, cpu) {
61087a4e344cSTejun Heo 			BUG_ON(init_worker_pool(pool));
6109ec22ca5eSTejun Heo 			pool->cpu = cpu;
61107a4e344cSTejun Heo 			cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
61117a4e344cSTejun Heo 			pool->attrs->nice = std_nice[i++];
6112f3f90ad4STejun Heo 			pool->node = cpu_to_node(cpu);
61137a4e344cSTejun Heo 
61149daf9e67STejun Heo 			/* alloc pool ID */
611568e13a67SLai Jiangshan 			mutex_lock(&wq_pool_mutex);
61169daf9e67STejun Heo 			BUG_ON(worker_pool_assign_id(pool));
611768e13a67SLai Jiangshan 			mutex_unlock(&wq_pool_mutex);
61184ce62e9eSTejun Heo 		}
61198b03ae3cSTejun Heo 	}
61208b03ae3cSTejun Heo 
61218a2b7538STejun Heo 	/* create default unbound and ordered wq attrs */
612229c91e99STejun Heo 	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
612329c91e99STejun Heo 		struct workqueue_attrs *attrs;
612429c91e99STejun Heo 
6125be69d00dSThomas Gleixner 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
612629c91e99STejun Heo 		attrs->nice = std_nice[i];
612729c91e99STejun Heo 		unbound_std_wq_attrs[i] = attrs;
61288a2b7538STejun Heo 
61298a2b7538STejun Heo 		/*
61308a2b7538STejun Heo 		 * An ordered wq should have only one pwq as ordering is
61318a2b7538STejun Heo 		 * guaranteed by max_active which is enforced by pwqs.
61328a2b7538STejun Heo 		 * Turn off NUMA so that dfl_pwq is used for all nodes.
61338a2b7538STejun Heo 		 */
6134be69d00dSThomas Gleixner 		BUG_ON(!(attrs = alloc_workqueue_attrs()));
61358a2b7538STejun Heo 		attrs->nice = std_nice[i];
61368a2b7538STejun Heo 		attrs->no_numa = true;
61378a2b7538STejun Heo 		ordered_wq_attrs[i] = attrs;
613829c91e99STejun Heo 	}
613929c91e99STejun Heo 
6140d320c038STejun Heo 	system_wq = alloc_workqueue("events", 0, 0);
61411aabe902SJoonsoo Kim 	system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
6142d320c038STejun Heo 	system_long_wq = alloc_workqueue("events_long", 0, 0);
6143f3421797STejun Heo 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
6144f3421797STejun Heo 					    WQ_UNBOUND_MAX_ACTIVE);
614524d51addSTejun Heo 	system_freezable_wq = alloc_workqueue("events_freezable",
614624d51addSTejun Heo 					      WQ_FREEZABLE, 0);
61470668106cSViresh Kumar 	system_power_efficient_wq = alloc_workqueue("events_power_efficient",
61480668106cSViresh Kumar 					      WQ_POWER_EFFICIENT, 0);
61490668106cSViresh Kumar 	system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
61500668106cSViresh Kumar 					      WQ_FREEZABLE | WQ_POWER_EFFICIENT,
61510668106cSViresh Kumar 					      0);
61521aabe902SJoonsoo Kim 	BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
61530668106cSViresh Kumar 	       !system_unbound_wq || !system_freezable_wq ||
61540668106cSViresh Kumar 	       !system_power_efficient_wq ||
61550668106cSViresh Kumar 	       !system_freezable_power_efficient_wq);
61563347fa09STejun Heo }
61573347fa09STejun Heo 
61583347fa09STejun Heo /**
61593347fa09STejun Heo  * workqueue_init - bring workqueue subsystem fully online
61603347fa09STejun Heo  *
61613347fa09STejun Heo  * This is the latter half of two-staged workqueue subsystem initialization
61623347fa09STejun Heo  * and invoked as soon as kthreads can be created and scheduled.
61633347fa09STejun Heo  * Workqueues have been created and work items queued on them, but there
61643347fa09STejun Heo  * are no kworkers executing the work items yet.  Populate the worker pools
61653347fa09STejun Heo  * with the initial workers and enable future kworker creations.
61663347fa09STejun Heo  */
61672333e829SYu Chen void __init workqueue_init(void)
61683347fa09STejun Heo {
61692186d9f9STejun Heo 	struct workqueue_struct *wq;
61703347fa09STejun Heo 	struct worker_pool *pool;
61713347fa09STejun Heo 	int cpu, bkt;
61723347fa09STejun Heo 
61732186d9f9STejun Heo 	/*
61742186d9f9STejun Heo 	 * It'd be simpler to initialize NUMA in workqueue_init_early() but
61752186d9f9STejun Heo 	 * CPU to node mapping may not be available that early on some
61762186d9f9STejun Heo 	 * archs such as power and arm64.  As per-cpu pools created
61772186d9f9STejun Heo 	 * previously could be missing node hint and unbound pools NUMA
61782186d9f9STejun Heo 	 * affinity, fix them up.
617940c17f75STejun Heo 	 *
618040c17f75STejun Heo 	 * Also, while iterating workqueues, create rescuers if requested.
61812186d9f9STejun Heo 	 */
61822186d9f9STejun Heo 	wq_numa_init();
61832186d9f9STejun Heo 
61842186d9f9STejun Heo 	mutex_lock(&wq_pool_mutex);
61852186d9f9STejun Heo 
61862186d9f9STejun Heo 	for_each_possible_cpu(cpu) {
61872186d9f9STejun Heo 		for_each_cpu_worker_pool(pool, cpu) {
61882186d9f9STejun Heo 			pool->node = cpu_to_node(cpu);
61892186d9f9STejun Heo 		}
61902186d9f9STejun Heo 	}
61912186d9f9STejun Heo 
619240c17f75STejun Heo 	list_for_each_entry(wq, &workqueues, list) {
61932186d9f9STejun Heo 		wq_update_unbound_numa(wq, smp_processor_id(), true);
619440c17f75STejun Heo 		WARN(init_rescuer(wq),
619540c17f75STejun Heo 		     "workqueue: failed to create early rescuer for %s",
619640c17f75STejun Heo 		     wq->name);
619740c17f75STejun Heo 	}
61982186d9f9STejun Heo 
61992186d9f9STejun Heo 	mutex_unlock(&wq_pool_mutex);
62002186d9f9STejun Heo 
62013347fa09STejun Heo 	/* create the initial workers */
62023347fa09STejun Heo 	for_each_online_cpu(cpu) {
62033347fa09STejun Heo 		for_each_cpu_worker_pool(pool, cpu) {
62043347fa09STejun Heo 			pool->flags &= ~POOL_DISASSOCIATED;
62053347fa09STejun Heo 			BUG_ON(!create_worker(pool));
62063347fa09STejun Heo 		}
62073347fa09STejun Heo 	}
62083347fa09STejun Heo 
62093347fa09STejun Heo 	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
62103347fa09STejun Heo 		BUG_ON(!create_worker(pool));
62113347fa09STejun Heo 
62123347fa09STejun Heo 	wq_online = true;
621382607adcSTejun Heo 	wq_watchdog_init();
62141da177e4SLinus Torvalds }
6215c4f135d6STetsuo Handa 
6216c4f135d6STetsuo Handa /*
6217c4f135d6STetsuo Handa  * Despite the naming, this is a no-op function which is here only for avoiding
6218c4f135d6STetsuo Handa  * link error. Since compile-time warning may fail to catch, we will need to
6219c4f135d6STetsuo Handa  * emit run-time warning from __flush_workqueue().
6220c4f135d6STetsuo Handa  */
6221c4f135d6STetsuo Handa void __warn_flushing_systemwide_wq(void) { }
6222c4f135d6STetsuo Handa EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
6223