xref: /linux-6.15/kernel/workqueue.c (revision e22bee78)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36c34056a3STejun Heo #include <linux/idr.h>
37*e22bee78STejun Heo 
38*e22bee78STejun Heo #include "workqueue_sched.h"
391da177e4SLinus Torvalds 
40c8e55f36STejun Heo enum {
41db7bccf4STejun Heo 	/* global_cwq flags */
42*e22bee78STejun Heo 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
43*e22bee78STejun Heo 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
44*e22bee78STejun Heo 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
45db7bccf4STejun Heo 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
46db7bccf4STejun Heo 
47c8e55f36STejun Heo 	/* worker flags */
48c8e55f36STejun Heo 	WORKER_STARTED		= 1 << 0,	/* started */
49c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
50c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
51*e22bee78STejun Heo 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
52db7bccf4STejun Heo 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
53*e22bee78STejun Heo 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
54*e22bee78STejun Heo 
55*e22bee78STejun Heo 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND,
56db7bccf4STejun Heo 
57db7bccf4STejun Heo 	/* gcwq->trustee_state */
58db7bccf4STejun Heo 	TRUSTEE_START		= 0,		/* start */
59db7bccf4STejun Heo 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
60db7bccf4STejun Heo 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
61db7bccf4STejun Heo 	TRUSTEE_RELEASE		= 3,		/* release workers */
62db7bccf4STejun Heo 	TRUSTEE_DONE		= 4,		/* trustee is done */
63c8e55f36STejun Heo 
64c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
65c8e55f36STejun Heo 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
66c8e55f36STejun Heo 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
67db7bccf4STejun Heo 
68*e22bee78STejun Heo 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
69*e22bee78STejun Heo 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
70*e22bee78STejun Heo 
71*e22bee78STejun Heo 	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
72*e22bee78STejun Heo 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
73*e22bee78STejun Heo 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
74db7bccf4STejun Heo 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
75*e22bee78STejun Heo 
76*e22bee78STejun Heo 	/*
77*e22bee78STejun Heo 	 * Rescue workers are used only on emergencies and shared by
78*e22bee78STejun Heo 	 * all cpus.  Give -20.
79*e22bee78STejun Heo 	 */
80*e22bee78STejun Heo 	RESCUER_NICE_LEVEL	= -20,
81c8e55f36STejun Heo };
82c8e55f36STejun Heo 
831da177e4SLinus Torvalds /*
844690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
854690c4abSTejun Heo  *
864690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
874690c4abSTejun Heo  *
88*e22bee78STejun Heo  * P: Preemption protected.  Disabling preemption is enough and should
89*e22bee78STejun Heo  *    only be modified and accessed from the local cpu.
90*e22bee78STejun Heo  *
918b03ae3cSTejun Heo  * L: gcwq->lock protected.  Access with gcwq->lock held.
924690c4abSTejun Heo  *
93*e22bee78STejun Heo  * X: During normal operation, modification requires gcwq->lock and
94*e22bee78STejun Heo  *    should be done only from local cpu.  Either disabling preemption
95*e22bee78STejun Heo  *    on local cpu or grabbing gcwq->lock is enough for read access.
96*e22bee78STejun Heo  *    While trustee is in charge, it's identical to L.
97*e22bee78STejun Heo  *
9873f53c4aSTejun Heo  * F: wq->flush_mutex protected.
9973f53c4aSTejun Heo  *
1004690c4abSTejun Heo  * W: workqueue_lock protected.
1014690c4abSTejun Heo  */
1024690c4abSTejun Heo 
1038b03ae3cSTejun Heo struct global_cwq;
104c34056a3STejun Heo 
105*e22bee78STejun Heo /*
106*e22bee78STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers
107*e22bee78STejun Heo  * are either serving the manager role, on idle list or on busy hash.
108*e22bee78STejun Heo  */
109c34056a3STejun Heo struct worker {
110c8e55f36STejun Heo 	/* on idle list while idle, on busy hash table while busy */
111c8e55f36STejun Heo 	union {
112c8e55f36STejun Heo 		struct list_head	entry;	/* L: while idle */
113c8e55f36STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
114c8e55f36STejun Heo 	};
115c8e55f36STejun Heo 
116c34056a3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
1178cca0eeaSTejun Heo 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
118affee4b2STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
119c34056a3STejun Heo 	struct task_struct	*task;		/* I: worker task */
1208b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
121*e22bee78STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
122*e22bee78STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
123*e22bee78STejun Heo 	unsigned int		flags;		/* X: flags */
124c34056a3STejun Heo 	int			id;		/* I: worker id */
125*e22bee78STejun Heo 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
126c34056a3STejun Heo };
127c34056a3STejun Heo 
1284690c4abSTejun Heo /*
129*e22bee78STejun Heo  * Global per-cpu workqueue.  There's one and only one for each cpu
130*e22bee78STejun Heo  * and all works are queued and processed here regardless of their
131*e22bee78STejun Heo  * target workqueues.
1328b03ae3cSTejun Heo  */
1338b03ae3cSTejun Heo struct global_cwq {
1348b03ae3cSTejun Heo 	spinlock_t		lock;		/* the gcwq lock */
1357e11629dSTejun Heo 	struct list_head	worklist;	/* L: list of pending works */
1368b03ae3cSTejun Heo 	unsigned int		cpu;		/* I: the associated cpu */
137db7bccf4STejun Heo 	unsigned int		flags;		/* L: GCWQ_* flags */
138c8e55f36STejun Heo 
139c8e55f36STejun Heo 	int			nr_workers;	/* L: total number of workers */
140c8e55f36STejun Heo 	int			nr_idle;	/* L: currently idle ones */
141c8e55f36STejun Heo 
142c8e55f36STejun Heo 	/* workers are chained either in the idle_list or busy_hash */
143*e22bee78STejun Heo 	struct list_head	idle_list;	/* X: list of idle workers */
144c8e55f36STejun Heo 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
145c8e55f36STejun Heo 						/* L: hash of busy workers */
146c8e55f36STejun Heo 
147*e22bee78STejun Heo 	struct timer_list	idle_timer;	/* L: worker idle timeout */
148*e22bee78STejun Heo 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
149*e22bee78STejun Heo 
1508b03ae3cSTejun Heo 	struct ida		worker_ida;	/* L: for worker IDs */
151db7bccf4STejun Heo 
152db7bccf4STejun Heo 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
153db7bccf4STejun Heo 	unsigned int		trustee_state;	/* L: trustee state */
154db7bccf4STejun Heo 	wait_queue_head_t	trustee_wait;	/* trustee wait */
155*e22bee78STejun Heo 	struct worker		*first_idle;	/* L: first idle worker */
1568b03ae3cSTejun Heo } ____cacheline_aligned_in_smp;
1578b03ae3cSTejun Heo 
1588b03ae3cSTejun Heo /*
159502ca9d8STejun Heo  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
1600f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
1610f900049STejun Heo  * aligned at two's power of the number of flag bits.
1621da177e4SLinus Torvalds  */
1631da177e4SLinus Torvalds struct cpu_workqueue_struct {
1648b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
1654690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
16673f53c4aSTejun Heo 	int			work_color;	/* L: current color */
16773f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
16873f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
16973f53c4aSTejun Heo 						/* L: nr of in_flight works */
1701e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
171a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
1721e19ffc6STejun Heo 	struct list_head	delayed_works;	/* L: delayed works */
1730f900049STejun Heo };
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds /*
17673f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
17773f53c4aSTejun Heo  */
17873f53c4aSTejun Heo struct wq_flusher {
17973f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
18073f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
18173f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
18273f53c4aSTejun Heo };
18373f53c4aSTejun Heo 
18473f53c4aSTejun Heo /*
1851da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
1861da177e4SLinus Torvalds  * per-CPU workqueues:
1871da177e4SLinus Torvalds  */
1881da177e4SLinus Torvalds struct workqueue_struct {
18997e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
1904690c4abSTejun Heo 	struct cpu_workqueue_struct *cpu_wq;	/* I: cwq's */
1914690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
19273f53c4aSTejun Heo 
19373f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
19473f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
19573f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
19673f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
19773f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
19873f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
19973f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
20073f53c4aSTejun Heo 
201502ca9d8STejun Heo 	unsigned long		single_cpu;	/* cpu for single cpu wq */
202502ca9d8STejun Heo 
203*e22bee78STejun Heo 	cpumask_var_t		mayday_mask;	/* cpus requesting rescue */
204*e22bee78STejun Heo 	struct worker		*rescuer;	/* I: rescue worker */
205*e22bee78STejun Heo 
206a0a1a5fdSTejun Heo 	int			saved_max_active; /* I: saved cwq max_active */
2074690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
2084e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
2094e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
2104e6045f1SJohannes Berg #endif
2111da177e4SLinus Torvalds };
2121da177e4SLinus Torvalds 
213db7bccf4STejun Heo #define for_each_busy_worker(worker, i, pos, gcwq)			\
214db7bccf4STejun Heo 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
215db7bccf4STejun Heo 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
216db7bccf4STejun Heo 
217dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
218dc186ad7SThomas Gleixner 
219dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
220dc186ad7SThomas Gleixner 
221dc186ad7SThomas Gleixner /*
222dc186ad7SThomas Gleixner  * fixup_init is called when:
223dc186ad7SThomas Gleixner  * - an active object is initialized
224dc186ad7SThomas Gleixner  */
225dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
226dc186ad7SThomas Gleixner {
227dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
228dc186ad7SThomas Gleixner 
229dc186ad7SThomas Gleixner 	switch (state) {
230dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
231dc186ad7SThomas Gleixner 		cancel_work_sync(work);
232dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
233dc186ad7SThomas Gleixner 		return 1;
234dc186ad7SThomas Gleixner 	default:
235dc186ad7SThomas Gleixner 		return 0;
236dc186ad7SThomas Gleixner 	}
237dc186ad7SThomas Gleixner }
238dc186ad7SThomas Gleixner 
239dc186ad7SThomas Gleixner /*
240dc186ad7SThomas Gleixner  * fixup_activate is called when:
241dc186ad7SThomas Gleixner  * - an active object is activated
242dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
243dc186ad7SThomas Gleixner  */
244dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
245dc186ad7SThomas Gleixner {
246dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
247dc186ad7SThomas Gleixner 
248dc186ad7SThomas Gleixner 	switch (state) {
249dc186ad7SThomas Gleixner 
250dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
251dc186ad7SThomas Gleixner 		/*
252dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
253dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
254dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
255dc186ad7SThomas Gleixner 		 */
25622df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
257dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
258dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
259dc186ad7SThomas Gleixner 			return 0;
260dc186ad7SThomas Gleixner 		}
261dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
262dc186ad7SThomas Gleixner 		return 0;
263dc186ad7SThomas Gleixner 
264dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
265dc186ad7SThomas Gleixner 		WARN_ON(1);
266dc186ad7SThomas Gleixner 
267dc186ad7SThomas Gleixner 	default:
268dc186ad7SThomas Gleixner 		return 0;
269dc186ad7SThomas Gleixner 	}
270dc186ad7SThomas Gleixner }
271dc186ad7SThomas Gleixner 
272dc186ad7SThomas Gleixner /*
273dc186ad7SThomas Gleixner  * fixup_free is called when:
274dc186ad7SThomas Gleixner  * - an active object is freed
275dc186ad7SThomas Gleixner  */
276dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
277dc186ad7SThomas Gleixner {
278dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
279dc186ad7SThomas Gleixner 
280dc186ad7SThomas Gleixner 	switch (state) {
281dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
282dc186ad7SThomas Gleixner 		cancel_work_sync(work);
283dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
284dc186ad7SThomas Gleixner 		return 1;
285dc186ad7SThomas Gleixner 	default:
286dc186ad7SThomas Gleixner 		return 0;
287dc186ad7SThomas Gleixner 	}
288dc186ad7SThomas Gleixner }
289dc186ad7SThomas Gleixner 
290dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
291dc186ad7SThomas Gleixner 	.name		= "work_struct",
292dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
293dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
294dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
295dc186ad7SThomas Gleixner };
296dc186ad7SThomas Gleixner 
297dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
298dc186ad7SThomas Gleixner {
299dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
300dc186ad7SThomas Gleixner }
301dc186ad7SThomas Gleixner 
302dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
303dc186ad7SThomas Gleixner {
304dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
305dc186ad7SThomas Gleixner }
306dc186ad7SThomas Gleixner 
307dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
308dc186ad7SThomas Gleixner {
309dc186ad7SThomas Gleixner 	if (onstack)
310dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
311dc186ad7SThomas Gleixner 	else
312dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
313dc186ad7SThomas Gleixner }
314dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
315dc186ad7SThomas Gleixner 
316dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
317dc186ad7SThomas Gleixner {
318dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
319dc186ad7SThomas Gleixner }
320dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
321dc186ad7SThomas Gleixner 
322dc186ad7SThomas Gleixner #else
323dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
324dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
325dc186ad7SThomas Gleixner #endif
326dc186ad7SThomas Gleixner 
32795402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
32895402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
3291da177e4SLinus Torvalds static LIST_HEAD(workqueues);
330a0a1a5fdSTejun Heo static bool workqueue_freezing;		/* W: have wqs started freezing? */
331c34056a3STejun Heo 
332*e22bee78STejun Heo /*
333*e22bee78STejun Heo  * The almighty global cpu workqueues.  nr_running is the only field
334*e22bee78STejun Heo  * which is expected to be used frequently by other cpus via
335*e22bee78STejun Heo  * try_to_wake_up().  Put it in a separate cacheline.
336*e22bee78STejun Heo  */
3378b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq);
338*e22bee78STejun Heo static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
3398b03ae3cSTejun Heo 
340c34056a3STejun Heo static int worker_thread(void *__worker);
3411da177e4SLinus Torvalds 
3428b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu)
3438b03ae3cSTejun Heo {
3448b03ae3cSTejun Heo 	return &per_cpu(global_cwq, cpu);
3458b03ae3cSTejun Heo }
3468b03ae3cSTejun Heo 
347*e22bee78STejun Heo static atomic_t *get_gcwq_nr_running(unsigned int cpu)
348*e22bee78STejun Heo {
349*e22bee78STejun Heo 	return &per_cpu(gcwq_nr_running, cpu);
350*e22bee78STejun Heo }
351*e22bee78STejun Heo 
3524690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
3534690c4abSTejun Heo 					    struct workqueue_struct *wq)
354a848e3b6SOleg Nesterov {
355a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
356a848e3b6SOleg Nesterov }
357a848e3b6SOleg Nesterov 
35873f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
35973f53c4aSTejun Heo {
36073f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
36173f53c4aSTejun Heo }
36273f53c4aSTejun Heo 
36373f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
36473f53c4aSTejun Heo {
36573f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
36673f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
36773f53c4aSTejun Heo }
36873f53c4aSTejun Heo 
36973f53c4aSTejun Heo static int work_next_color(int color)
37073f53c4aSTejun Heo {
37173f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
37273f53c4aSTejun Heo }
37373f53c4aSTejun Heo 
3744594bf15SDavid Howells /*
3757a22ad75STejun Heo  * Work data points to the cwq while a work is on queue.  Once
3767a22ad75STejun Heo  * execution starts, it points to the cpu the work was last on.  This
3777a22ad75STejun Heo  * can be distinguished by comparing the data value against
3787a22ad75STejun Heo  * PAGE_OFFSET.
3797a22ad75STejun Heo  *
3807a22ad75STejun Heo  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
3817a22ad75STejun Heo  * cwq, cpu or clear work->data.  These functions should only be
3827a22ad75STejun Heo  * called while the work is owned - ie. while the PENDING bit is set.
3837a22ad75STejun Heo  *
3847a22ad75STejun Heo  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
3857a22ad75STejun Heo  * corresponding to a work.  gcwq is available once the work has been
3867a22ad75STejun Heo  * queued anywhere after initialization.  cwq is available only from
3877a22ad75STejun Heo  * queueing until execution starts.
3884594bf15SDavid Howells  */
3897a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data,
3907a22ad75STejun Heo 				 unsigned long flags)
3917a22ad75STejun Heo {
3927a22ad75STejun Heo 	BUG_ON(!work_pending(work));
3937a22ad75STejun Heo 	atomic_long_set(&work->data, data | flags | work_static(work));
3947a22ad75STejun Heo }
3957a22ad75STejun Heo 
3967a22ad75STejun Heo static void set_work_cwq(struct work_struct *work,
3974690c4abSTejun Heo 			 struct cpu_workqueue_struct *cwq,
3984690c4abSTejun Heo 			 unsigned long extra_flags)
399365970a1SDavid Howells {
4007a22ad75STejun Heo 	set_work_data(work, (unsigned long)cwq,
40122df02bbSTejun Heo 		      WORK_STRUCT_PENDING | extra_flags);
402365970a1SDavid Howells }
403365970a1SDavid Howells 
4047a22ad75STejun Heo static void set_work_cpu(struct work_struct *work, unsigned int cpu)
4054d707b9fSOleg Nesterov {
4067a22ad75STejun Heo 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
4074d707b9fSOleg Nesterov }
4084d707b9fSOleg Nesterov 
4097a22ad75STejun Heo static void clear_work_data(struct work_struct *work)
410365970a1SDavid Howells {
4117a22ad75STejun Heo 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
4127a22ad75STejun Heo }
4137a22ad75STejun Heo 
4147a22ad75STejun Heo static inline unsigned long get_work_data(struct work_struct *work)
4157a22ad75STejun Heo {
4167a22ad75STejun Heo 	return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
4177a22ad75STejun Heo }
4187a22ad75STejun Heo 
4197a22ad75STejun Heo static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
4207a22ad75STejun Heo {
4217a22ad75STejun Heo 	unsigned long data = get_work_data(work);
4227a22ad75STejun Heo 
4237a22ad75STejun Heo 	return data >= PAGE_OFFSET ? (void *)data : NULL;
4247a22ad75STejun Heo }
4257a22ad75STejun Heo 
4267a22ad75STejun Heo static struct global_cwq *get_work_gcwq(struct work_struct *work)
4277a22ad75STejun Heo {
4287a22ad75STejun Heo 	unsigned long data = get_work_data(work);
4297a22ad75STejun Heo 	unsigned int cpu;
4307a22ad75STejun Heo 
4317a22ad75STejun Heo 	if (data >= PAGE_OFFSET)
4327a22ad75STejun Heo 		return ((struct cpu_workqueue_struct *)data)->gcwq;
4337a22ad75STejun Heo 
4347a22ad75STejun Heo 	cpu = data >> WORK_STRUCT_FLAG_BITS;
4357a22ad75STejun Heo 	if (cpu == NR_CPUS)
4367a22ad75STejun Heo 		return NULL;
4377a22ad75STejun Heo 
4387a22ad75STejun Heo 	BUG_ON(cpu >= num_possible_cpus());
4397a22ad75STejun Heo 	return get_gcwq(cpu);
440365970a1SDavid Howells }
441365970a1SDavid Howells 
442*e22bee78STejun Heo /*
443*e22bee78STejun Heo  * Policy functions.  These define the policies on how the global
444*e22bee78STejun Heo  * worker pool is managed.  Unless noted otherwise, these functions
445*e22bee78STejun Heo  * assume that they're being called with gcwq->lock held.
446*e22bee78STejun Heo  */
447*e22bee78STejun Heo 
448*e22bee78STejun Heo /*
449*e22bee78STejun Heo  * Need to wake up a worker?  Called from anything but currently
450*e22bee78STejun Heo  * running workers.
451*e22bee78STejun Heo  */
452*e22bee78STejun Heo static bool need_more_worker(struct global_cwq *gcwq)
453*e22bee78STejun Heo {
454*e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
455*e22bee78STejun Heo 
456*e22bee78STejun Heo 	return !list_empty(&gcwq->worklist) && !atomic_read(nr_running);
457*e22bee78STejun Heo }
458*e22bee78STejun Heo 
459*e22bee78STejun Heo /* Can I start working?  Called from busy but !running workers. */
460*e22bee78STejun Heo static bool may_start_working(struct global_cwq *gcwq)
461*e22bee78STejun Heo {
462*e22bee78STejun Heo 	return gcwq->nr_idle;
463*e22bee78STejun Heo }
464*e22bee78STejun Heo 
465*e22bee78STejun Heo /* Do I need to keep working?  Called from currently running workers. */
466*e22bee78STejun Heo static bool keep_working(struct global_cwq *gcwq)
467*e22bee78STejun Heo {
468*e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
469*e22bee78STejun Heo 
470*e22bee78STejun Heo 	return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
471*e22bee78STejun Heo }
472*e22bee78STejun Heo 
473*e22bee78STejun Heo /* Do we need a new worker?  Called from manager. */
474*e22bee78STejun Heo static bool need_to_create_worker(struct global_cwq *gcwq)
475*e22bee78STejun Heo {
476*e22bee78STejun Heo 	return need_more_worker(gcwq) && !may_start_working(gcwq);
477*e22bee78STejun Heo }
478*e22bee78STejun Heo 
479*e22bee78STejun Heo /* Do I need to be the manager? */
480*e22bee78STejun Heo static bool need_to_manage_workers(struct global_cwq *gcwq)
481*e22bee78STejun Heo {
482*e22bee78STejun Heo 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
483*e22bee78STejun Heo }
484*e22bee78STejun Heo 
485*e22bee78STejun Heo /* Do we have too many workers and should some go away? */
486*e22bee78STejun Heo static bool too_many_workers(struct global_cwq *gcwq)
487*e22bee78STejun Heo {
488*e22bee78STejun Heo 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
489*e22bee78STejun Heo 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
490*e22bee78STejun Heo 	int nr_busy = gcwq->nr_workers - nr_idle;
491*e22bee78STejun Heo 
492*e22bee78STejun Heo 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
493*e22bee78STejun Heo }
494*e22bee78STejun Heo 
495*e22bee78STejun Heo /*
496*e22bee78STejun Heo  * Wake up functions.
497*e22bee78STejun Heo  */
498*e22bee78STejun Heo 
4997e11629dSTejun Heo /* Return the first worker.  Safe with preemption disabled */
5007e11629dSTejun Heo static struct worker *first_worker(struct global_cwq *gcwq)
5017e11629dSTejun Heo {
5027e11629dSTejun Heo 	if (unlikely(list_empty(&gcwq->idle_list)))
5037e11629dSTejun Heo 		return NULL;
5047e11629dSTejun Heo 
5057e11629dSTejun Heo 	return list_first_entry(&gcwq->idle_list, struct worker, entry);
5067e11629dSTejun Heo }
5077e11629dSTejun Heo 
5087e11629dSTejun Heo /**
5097e11629dSTejun Heo  * wake_up_worker - wake up an idle worker
5107e11629dSTejun Heo  * @gcwq: gcwq to wake worker for
5117e11629dSTejun Heo  *
5127e11629dSTejun Heo  * Wake up the first idle worker of @gcwq.
5137e11629dSTejun Heo  *
5147e11629dSTejun Heo  * CONTEXT:
5157e11629dSTejun Heo  * spin_lock_irq(gcwq->lock).
5167e11629dSTejun Heo  */
5177e11629dSTejun Heo static void wake_up_worker(struct global_cwq *gcwq)
5187e11629dSTejun Heo {
5197e11629dSTejun Heo 	struct worker *worker = first_worker(gcwq);
5207e11629dSTejun Heo 
5217e11629dSTejun Heo 	if (likely(worker))
5227e11629dSTejun Heo 		wake_up_process(worker->task);
5237e11629dSTejun Heo }
5247e11629dSTejun Heo 
5254690c4abSTejun Heo /**
526*e22bee78STejun Heo  * wq_worker_waking_up - a worker is waking up
527*e22bee78STejun Heo  * @task: task waking up
528*e22bee78STejun Heo  * @cpu: CPU @task is waking up to
529*e22bee78STejun Heo  *
530*e22bee78STejun Heo  * This function is called during try_to_wake_up() when a worker is
531*e22bee78STejun Heo  * being awoken.
532*e22bee78STejun Heo  *
533*e22bee78STejun Heo  * CONTEXT:
534*e22bee78STejun Heo  * spin_lock_irq(rq->lock)
535*e22bee78STejun Heo  */
536*e22bee78STejun Heo void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
537*e22bee78STejun Heo {
538*e22bee78STejun Heo 	struct worker *worker = kthread_data(task);
539*e22bee78STejun Heo 
540*e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
541*e22bee78STejun Heo 		atomic_inc(get_gcwq_nr_running(cpu));
542*e22bee78STejun Heo }
543*e22bee78STejun Heo 
544*e22bee78STejun Heo /**
545*e22bee78STejun Heo  * wq_worker_sleeping - a worker is going to sleep
546*e22bee78STejun Heo  * @task: task going to sleep
547*e22bee78STejun Heo  * @cpu: CPU in question, must be the current CPU number
548*e22bee78STejun Heo  *
549*e22bee78STejun Heo  * This function is called during schedule() when a busy worker is
550*e22bee78STejun Heo  * going to sleep.  Worker on the same cpu can be woken up by
551*e22bee78STejun Heo  * returning pointer to its task.
552*e22bee78STejun Heo  *
553*e22bee78STejun Heo  * CONTEXT:
554*e22bee78STejun Heo  * spin_lock_irq(rq->lock)
555*e22bee78STejun Heo  *
556*e22bee78STejun Heo  * RETURNS:
557*e22bee78STejun Heo  * Worker task on @cpu to wake up, %NULL if none.
558*e22bee78STejun Heo  */
559*e22bee78STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task,
560*e22bee78STejun Heo 				       unsigned int cpu)
561*e22bee78STejun Heo {
562*e22bee78STejun Heo 	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
563*e22bee78STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
564*e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(cpu);
565*e22bee78STejun Heo 
566*e22bee78STejun Heo 	if (unlikely(worker->flags & WORKER_NOT_RUNNING))
567*e22bee78STejun Heo 		return NULL;
568*e22bee78STejun Heo 
569*e22bee78STejun Heo 	/* this can only happen on the local cpu */
570*e22bee78STejun Heo 	BUG_ON(cpu != raw_smp_processor_id());
571*e22bee78STejun Heo 
572*e22bee78STejun Heo 	/*
573*e22bee78STejun Heo 	 * The counterpart of the following dec_and_test, implied mb,
574*e22bee78STejun Heo 	 * worklist not empty test sequence is in insert_work().
575*e22bee78STejun Heo 	 * Please read comment there.
576*e22bee78STejun Heo 	 *
577*e22bee78STejun Heo 	 * NOT_RUNNING is clear.  This means that trustee is not in
578*e22bee78STejun Heo 	 * charge and we're running on the local cpu w/ rq lock held
579*e22bee78STejun Heo 	 * and preemption disabled, which in turn means that none else
580*e22bee78STejun Heo 	 * could be manipulating idle_list, so dereferencing idle_list
581*e22bee78STejun Heo 	 * without gcwq lock is safe.
582*e22bee78STejun Heo 	 */
583*e22bee78STejun Heo 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
584*e22bee78STejun Heo 		to_wakeup = first_worker(gcwq);
585*e22bee78STejun Heo 	return to_wakeup ? to_wakeup->task : NULL;
586*e22bee78STejun Heo }
587*e22bee78STejun Heo 
588*e22bee78STejun Heo /**
589*e22bee78STejun Heo  * worker_set_flags - set worker flags and adjust nr_running accordingly
590d302f017STejun Heo  * @worker: worker to set flags for
591d302f017STejun Heo  * @flags: flags to set
592d302f017STejun Heo  * @wakeup: wakeup an idle worker if necessary
593d302f017STejun Heo  *
594*e22bee78STejun Heo  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
595*e22bee78STejun Heo  * nr_running becomes zero and @wakeup is %true, an idle worker is
596*e22bee78STejun Heo  * woken up.
597d302f017STejun Heo  *
598d302f017STejun Heo  * LOCKING:
599d302f017STejun Heo  * spin_lock_irq(gcwq->lock).
600d302f017STejun Heo  */
601d302f017STejun Heo static inline void worker_set_flags(struct worker *worker, unsigned int flags,
602d302f017STejun Heo 				    bool wakeup)
603d302f017STejun Heo {
604*e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
605*e22bee78STejun Heo 
606*e22bee78STejun Heo 	/*
607*e22bee78STejun Heo 	 * If transitioning into NOT_RUNNING, adjust nr_running and
608*e22bee78STejun Heo 	 * wake up an idle worker as necessary if requested by
609*e22bee78STejun Heo 	 * @wakeup.
610*e22bee78STejun Heo 	 */
611*e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) &&
612*e22bee78STejun Heo 	    !(worker->flags & WORKER_NOT_RUNNING)) {
613*e22bee78STejun Heo 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
614*e22bee78STejun Heo 
615*e22bee78STejun Heo 		if (wakeup) {
616*e22bee78STejun Heo 			if (atomic_dec_and_test(nr_running) &&
617*e22bee78STejun Heo 			    !list_empty(&gcwq->worklist))
618*e22bee78STejun Heo 				wake_up_worker(gcwq);
619*e22bee78STejun Heo 		} else
620*e22bee78STejun Heo 			atomic_dec(nr_running);
621*e22bee78STejun Heo 	}
622*e22bee78STejun Heo 
623d302f017STejun Heo 	worker->flags |= flags;
624d302f017STejun Heo }
625d302f017STejun Heo 
626d302f017STejun Heo /**
627*e22bee78STejun Heo  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
628d302f017STejun Heo  * @worker: worker to set flags for
629d302f017STejun Heo  * @flags: flags to clear
630d302f017STejun Heo  *
631*e22bee78STejun Heo  * Clear @flags in @worker->flags and adjust nr_running accordingly.
632d302f017STejun Heo  *
633d302f017STejun Heo  * LOCKING:
634d302f017STejun Heo  * spin_lock_irq(gcwq->lock).
635d302f017STejun Heo  */
636d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
637d302f017STejun Heo {
638*e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
639*e22bee78STejun Heo 	unsigned int oflags = worker->flags;
640*e22bee78STejun Heo 
641d302f017STejun Heo 	worker->flags &= ~flags;
642*e22bee78STejun Heo 
643*e22bee78STejun Heo 	/* if transitioning out of NOT_RUNNING, increment nr_running */
644*e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
645*e22bee78STejun Heo 		if (!(worker->flags & WORKER_NOT_RUNNING))
646*e22bee78STejun Heo 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
647d302f017STejun Heo }
648d302f017STejun Heo 
649d302f017STejun Heo /**
650c8e55f36STejun Heo  * busy_worker_head - return the busy hash head for a work
651c8e55f36STejun Heo  * @gcwq: gcwq of interest
652c8e55f36STejun Heo  * @work: work to be hashed
653c8e55f36STejun Heo  *
654c8e55f36STejun Heo  * Return hash head of @gcwq for @work.
655c8e55f36STejun Heo  *
656c8e55f36STejun Heo  * CONTEXT:
657c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
658c8e55f36STejun Heo  *
659c8e55f36STejun Heo  * RETURNS:
660c8e55f36STejun Heo  * Pointer to the hash head.
661c8e55f36STejun Heo  */
662c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
663c8e55f36STejun Heo 					   struct work_struct *work)
664c8e55f36STejun Heo {
665c8e55f36STejun Heo 	const int base_shift = ilog2(sizeof(struct work_struct));
666c8e55f36STejun Heo 	unsigned long v = (unsigned long)work;
667c8e55f36STejun Heo 
668c8e55f36STejun Heo 	/* simple shift and fold hash, do we need something better? */
669c8e55f36STejun Heo 	v >>= base_shift;
670c8e55f36STejun Heo 	v += v >> BUSY_WORKER_HASH_ORDER;
671c8e55f36STejun Heo 	v &= BUSY_WORKER_HASH_MASK;
672c8e55f36STejun Heo 
673c8e55f36STejun Heo 	return &gcwq->busy_hash[v];
674c8e55f36STejun Heo }
675c8e55f36STejun Heo 
676c8e55f36STejun Heo /**
6778cca0eeaSTejun Heo  * __find_worker_executing_work - find worker which is executing a work
6788cca0eeaSTejun Heo  * @gcwq: gcwq of interest
6798cca0eeaSTejun Heo  * @bwh: hash head as returned by busy_worker_head()
6808cca0eeaSTejun Heo  * @work: work to find worker for
6818cca0eeaSTejun Heo  *
6828cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  @bwh should be
6838cca0eeaSTejun Heo  * the hash head obtained by calling busy_worker_head() with the same
6848cca0eeaSTejun Heo  * work.
6858cca0eeaSTejun Heo  *
6868cca0eeaSTejun Heo  * CONTEXT:
6878cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
6888cca0eeaSTejun Heo  *
6898cca0eeaSTejun Heo  * RETURNS:
6908cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
6918cca0eeaSTejun Heo  * otherwise.
6928cca0eeaSTejun Heo  */
6938cca0eeaSTejun Heo static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
6948cca0eeaSTejun Heo 						   struct hlist_head *bwh,
6958cca0eeaSTejun Heo 						   struct work_struct *work)
6968cca0eeaSTejun Heo {
6978cca0eeaSTejun Heo 	struct worker *worker;
6988cca0eeaSTejun Heo 	struct hlist_node *tmp;
6998cca0eeaSTejun Heo 
7008cca0eeaSTejun Heo 	hlist_for_each_entry(worker, tmp, bwh, hentry)
7018cca0eeaSTejun Heo 		if (worker->current_work == work)
7028cca0eeaSTejun Heo 			return worker;
7038cca0eeaSTejun Heo 	return NULL;
7048cca0eeaSTejun Heo }
7058cca0eeaSTejun Heo 
7068cca0eeaSTejun Heo /**
7078cca0eeaSTejun Heo  * find_worker_executing_work - find worker which is executing a work
7088cca0eeaSTejun Heo  * @gcwq: gcwq of interest
7098cca0eeaSTejun Heo  * @work: work to find worker for
7108cca0eeaSTejun Heo  *
7118cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  This function is
7128cca0eeaSTejun Heo  * identical to __find_worker_executing_work() except that this
7138cca0eeaSTejun Heo  * function calculates @bwh itself.
7148cca0eeaSTejun Heo  *
7158cca0eeaSTejun Heo  * CONTEXT:
7168cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
7178cca0eeaSTejun Heo  *
7188cca0eeaSTejun Heo  * RETURNS:
7198cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
7208cca0eeaSTejun Heo  * otherwise.
7218cca0eeaSTejun Heo  */
7228cca0eeaSTejun Heo static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
7238cca0eeaSTejun Heo 						 struct work_struct *work)
7248cca0eeaSTejun Heo {
7258cca0eeaSTejun Heo 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
7268cca0eeaSTejun Heo 					    work);
7278cca0eeaSTejun Heo }
7288cca0eeaSTejun Heo 
7298cca0eeaSTejun Heo /**
7307e11629dSTejun Heo  * insert_work - insert a work into gcwq
7314690c4abSTejun Heo  * @cwq: cwq @work belongs to
7324690c4abSTejun Heo  * @work: work to insert
7334690c4abSTejun Heo  * @head: insertion point
7344690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
7354690c4abSTejun Heo  *
7367e11629dSTejun Heo  * Insert @work which belongs to @cwq into @gcwq after @head.
7377e11629dSTejun Heo  * @extra_flags is or'd to work_struct flags.
7384690c4abSTejun Heo  *
7394690c4abSTejun Heo  * CONTEXT:
7408b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
7414690c4abSTejun Heo  */
742b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
7434690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
7444690c4abSTejun Heo 			unsigned int extra_flags)
745b89deed3SOleg Nesterov {
746*e22bee78STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
747*e22bee78STejun Heo 
7484690c4abSTejun Heo 	/* we own @work, set data and link */
7497a22ad75STejun Heo 	set_work_cwq(work, cwq, extra_flags);
7504690c4abSTejun Heo 
7516e84d644SOleg Nesterov 	/*
7526e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
7536e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
7546e84d644SOleg Nesterov 	 */
7556e84d644SOleg Nesterov 	smp_wmb();
7564690c4abSTejun Heo 
7571a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
758*e22bee78STejun Heo 
759*e22bee78STejun Heo 	/*
760*e22bee78STejun Heo 	 * Ensure either worker_sched_deactivated() sees the above
761*e22bee78STejun Heo 	 * list_add_tail() or we see zero nr_running to avoid workers
762*e22bee78STejun Heo 	 * lying around lazily while there are works to be processed.
763*e22bee78STejun Heo 	 */
764*e22bee78STejun Heo 	smp_mb();
765*e22bee78STejun Heo 
766*e22bee78STejun Heo 	if (!atomic_read(get_gcwq_nr_running(gcwq->cpu)))
767*e22bee78STejun Heo 		wake_up_worker(gcwq);
768b89deed3SOleg Nesterov }
769b89deed3SOleg Nesterov 
770502ca9d8STejun Heo /**
771502ca9d8STejun Heo  * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
772502ca9d8STejun Heo  * @cwq: cwq to unbind
773502ca9d8STejun Heo  *
774502ca9d8STejun Heo  * Try to unbind @cwq from single cpu workqueue processing.  If
775502ca9d8STejun Heo  * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
776502ca9d8STejun Heo  *
777502ca9d8STejun Heo  * CONTEXT:
778502ca9d8STejun Heo  * spin_lock_irq(gcwq->lock).
779502ca9d8STejun Heo  */
780502ca9d8STejun Heo static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
781502ca9d8STejun Heo {
782502ca9d8STejun Heo 	struct workqueue_struct *wq = cwq->wq;
783502ca9d8STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
784502ca9d8STejun Heo 
785502ca9d8STejun Heo 	BUG_ON(wq->single_cpu != gcwq->cpu);
786502ca9d8STejun Heo 	/*
787502ca9d8STejun Heo 	 * Unbind from workqueue if @cwq is not frozen.  If frozen,
788502ca9d8STejun Heo 	 * thaw_workqueues() will either restart processing on this
789502ca9d8STejun Heo 	 * cpu or unbind if empty.  This keeps works queued while
790502ca9d8STejun Heo 	 * frozen fully ordered and flushable.
791502ca9d8STejun Heo 	 */
792502ca9d8STejun Heo 	if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
793502ca9d8STejun Heo 		smp_wmb();	/* paired with cmpxchg() in __queue_work() */
794502ca9d8STejun Heo 		wq->single_cpu = NR_CPUS;
795502ca9d8STejun Heo 	}
796502ca9d8STejun Heo }
797502ca9d8STejun Heo 
7984690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
7991da177e4SLinus Torvalds 			 struct work_struct *work)
8001da177e4SLinus Torvalds {
801502ca9d8STejun Heo 	struct global_cwq *gcwq;
802502ca9d8STejun Heo 	struct cpu_workqueue_struct *cwq;
8031e19ffc6STejun Heo 	struct list_head *worklist;
8041da177e4SLinus Torvalds 	unsigned long flags;
805502ca9d8STejun Heo 	bool arbitrate;
8061da177e4SLinus Torvalds 
807dc186ad7SThomas Gleixner 	debug_work_activate(work);
8081e19ffc6STejun Heo 
80918aa9effSTejun Heo 	/*
81018aa9effSTejun Heo 	 * Determine gcwq to use.  SINGLE_CPU is inherently
81118aa9effSTejun Heo 	 * NON_REENTRANT, so test it first.
81218aa9effSTejun Heo 	 */
813502ca9d8STejun Heo 	if (!(wq->flags & WQ_SINGLE_CPU)) {
81418aa9effSTejun Heo 		struct global_cwq *last_gcwq;
81518aa9effSTejun Heo 
81618aa9effSTejun Heo 		/*
81718aa9effSTejun Heo 		 * It's multi cpu.  If @wq is non-reentrant and @work
81818aa9effSTejun Heo 		 * was previously on a different cpu, it might still
81918aa9effSTejun Heo 		 * be running there, in which case the work needs to
82018aa9effSTejun Heo 		 * be queued on that cpu to guarantee non-reentrance.
82118aa9effSTejun Heo 		 */
822502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
82318aa9effSTejun Heo 		if (wq->flags & WQ_NON_REENTRANT &&
82418aa9effSTejun Heo 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
82518aa9effSTejun Heo 			struct worker *worker;
82618aa9effSTejun Heo 
82718aa9effSTejun Heo 			spin_lock_irqsave(&last_gcwq->lock, flags);
82818aa9effSTejun Heo 
82918aa9effSTejun Heo 			worker = find_worker_executing_work(last_gcwq, work);
83018aa9effSTejun Heo 
83118aa9effSTejun Heo 			if (worker && worker->current_cwq->wq == wq)
83218aa9effSTejun Heo 				gcwq = last_gcwq;
83318aa9effSTejun Heo 			else {
83418aa9effSTejun Heo 				/* meh... not running there, queue here */
83518aa9effSTejun Heo 				spin_unlock_irqrestore(&last_gcwq->lock, flags);
83618aa9effSTejun Heo 				spin_lock_irqsave(&gcwq->lock, flags);
83718aa9effSTejun Heo 			}
83818aa9effSTejun Heo 		} else
8398b03ae3cSTejun Heo 			spin_lock_irqsave(&gcwq->lock, flags);
840502ca9d8STejun Heo 	} else {
841502ca9d8STejun Heo 		unsigned int req_cpu = cpu;
842502ca9d8STejun Heo 
843502ca9d8STejun Heo 		/*
844502ca9d8STejun Heo 		 * It's a bit more complex for single cpu workqueues.
845502ca9d8STejun Heo 		 * We first need to determine which cpu is going to be
846502ca9d8STejun Heo 		 * used.  If no cpu is currently serving this
847502ca9d8STejun Heo 		 * workqueue, arbitrate using atomic accesses to
848502ca9d8STejun Heo 		 * wq->single_cpu; otherwise, use the current one.
849502ca9d8STejun Heo 		 */
850502ca9d8STejun Heo 	retry:
851502ca9d8STejun Heo 		cpu = wq->single_cpu;
852502ca9d8STejun Heo 		arbitrate = cpu == NR_CPUS;
853502ca9d8STejun Heo 		if (arbitrate)
854502ca9d8STejun Heo 			cpu = req_cpu;
855502ca9d8STejun Heo 
856502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
857502ca9d8STejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
858502ca9d8STejun Heo 
859502ca9d8STejun Heo 		/*
860502ca9d8STejun Heo 		 * The following cmpxchg() is a full barrier paired
861502ca9d8STejun Heo 		 * with smp_wmb() in cwq_unbind_single_cpu() and
862502ca9d8STejun Heo 		 * guarantees that all changes to wq->st_* fields are
863502ca9d8STejun Heo 		 * visible on the new cpu after this point.
864502ca9d8STejun Heo 		 */
865502ca9d8STejun Heo 		if (arbitrate)
866502ca9d8STejun Heo 			cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
867502ca9d8STejun Heo 
868502ca9d8STejun Heo 		if (unlikely(wq->single_cpu != cpu)) {
869502ca9d8STejun Heo 			spin_unlock_irqrestore(&gcwq->lock, flags);
870502ca9d8STejun Heo 			goto retry;
871502ca9d8STejun Heo 		}
872502ca9d8STejun Heo 	}
873502ca9d8STejun Heo 
874502ca9d8STejun Heo 	/* gcwq determined, get cwq and queue */
875502ca9d8STejun Heo 	cwq = get_cwq(gcwq->cpu, wq);
876502ca9d8STejun Heo 
8774690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
8781e19ffc6STejun Heo 
87973f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
8801e19ffc6STejun Heo 
8811e19ffc6STejun Heo 	if (likely(cwq->nr_active < cwq->max_active)) {
8821e19ffc6STejun Heo 		cwq->nr_active++;
8837e11629dSTejun Heo 		worklist = &gcwq->worklist;
8841e19ffc6STejun Heo 	} else
8851e19ffc6STejun Heo 		worklist = &cwq->delayed_works;
8861e19ffc6STejun Heo 
8871e19ffc6STejun Heo 	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
8881e19ffc6STejun Heo 
8898b03ae3cSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
8901da177e4SLinus Torvalds }
8911da177e4SLinus Torvalds 
8920fcb78c2SRolf Eike Beer /**
8930fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
8940fcb78c2SRolf Eike Beer  * @wq: workqueue to use
8950fcb78c2SRolf Eike Beer  * @work: work to queue
8960fcb78c2SRolf Eike Beer  *
897057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
8981da177e4SLinus Torvalds  *
89900dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
90000dfcaf7SOleg Nesterov  * it can be processed by another CPU.
9011da177e4SLinus Torvalds  */
9027ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
9031da177e4SLinus Torvalds {
904ef1ca236SOleg Nesterov 	int ret;
9051da177e4SLinus Torvalds 
906ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
907a848e3b6SOleg Nesterov 	put_cpu();
908ef1ca236SOleg Nesterov 
9091da177e4SLinus Torvalds 	return ret;
9101da177e4SLinus Torvalds }
911ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
9121da177e4SLinus Torvalds 
913c1a220e7SZhang Rui /**
914c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
915c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
916c1a220e7SZhang Rui  * @wq: workqueue to use
917c1a220e7SZhang Rui  * @work: work to queue
918c1a220e7SZhang Rui  *
919c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
920c1a220e7SZhang Rui  *
921c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
922c1a220e7SZhang Rui  * can't go away.
923c1a220e7SZhang Rui  */
924c1a220e7SZhang Rui int
925c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
926c1a220e7SZhang Rui {
927c1a220e7SZhang Rui 	int ret = 0;
928c1a220e7SZhang Rui 
92922df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
9304690c4abSTejun Heo 		__queue_work(cpu, wq, work);
931c1a220e7SZhang Rui 		ret = 1;
932c1a220e7SZhang Rui 	}
933c1a220e7SZhang Rui 	return ret;
934c1a220e7SZhang Rui }
935c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
936c1a220e7SZhang Rui 
9376d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
9381da177e4SLinus Torvalds {
93952bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
9407a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
9411da177e4SLinus Torvalds 
9424690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
9431da177e4SLinus Torvalds }
9441da177e4SLinus Torvalds 
9450fcb78c2SRolf Eike Beer /**
9460fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
9470fcb78c2SRolf Eike Beer  * @wq: workqueue to use
948af9997e4SRandy Dunlap  * @dwork: delayable work to queue
9490fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
9500fcb78c2SRolf Eike Beer  *
951057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
9520fcb78c2SRolf Eike Beer  */
9537ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
95452bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
9551da177e4SLinus Torvalds {
95652bad64dSDavid Howells 	if (delay == 0)
95763bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
9581da177e4SLinus Torvalds 
95963bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
9601da177e4SLinus Torvalds }
961ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
9621da177e4SLinus Torvalds 
9630fcb78c2SRolf Eike Beer /**
9640fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
9650fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
9660fcb78c2SRolf Eike Beer  * @wq: workqueue to use
967af9997e4SRandy Dunlap  * @dwork: work to queue
9680fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
9690fcb78c2SRolf Eike Beer  *
970057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
9710fcb78c2SRolf Eike Beer  */
9727a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
97352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
9747a6bc1cdSVenkatesh Pallipadi {
9757a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
97652bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
97752bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
9787a6bc1cdSVenkatesh Pallipadi 
97922df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
9807a22ad75STejun Heo 		struct global_cwq *gcwq = get_work_gcwq(work);
9817a22ad75STejun Heo 		unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
9827a22ad75STejun Heo 
9837a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
9847a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
9857a6bc1cdSVenkatesh Pallipadi 
9868a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
9877a22ad75STejun Heo 		/*
9887a22ad75STejun Heo 		 * This stores cwq for the moment, for the timer_fn.
9897a22ad75STejun Heo 		 * Note that the work's gcwq is preserved to allow
9907a22ad75STejun Heo 		 * reentrance detection for delayed works.
9917a22ad75STejun Heo 		 */
9927a22ad75STejun Heo 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
9937a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
99452bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
9957a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
99663bc0362SOleg Nesterov 
99763bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
9987a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
99963bc0362SOleg Nesterov 		else
100063bc0362SOleg Nesterov 			add_timer(timer);
10017a6bc1cdSVenkatesh Pallipadi 		ret = 1;
10027a6bc1cdSVenkatesh Pallipadi 	}
10037a6bc1cdSVenkatesh Pallipadi 	return ret;
10047a6bc1cdSVenkatesh Pallipadi }
1005ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
10061da177e4SLinus Torvalds 
1007c8e55f36STejun Heo /**
1008c8e55f36STejun Heo  * worker_enter_idle - enter idle state
1009c8e55f36STejun Heo  * @worker: worker which is entering idle state
1010c8e55f36STejun Heo  *
1011c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
1012c8e55f36STejun Heo  * necessary.
1013c8e55f36STejun Heo  *
1014c8e55f36STejun Heo  * LOCKING:
1015c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1016c8e55f36STejun Heo  */
1017c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
1018c8e55f36STejun Heo {
1019c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1020c8e55f36STejun Heo 
1021c8e55f36STejun Heo 	BUG_ON(worker->flags & WORKER_IDLE);
1022c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->entry) &&
1023c8e55f36STejun Heo 	       (worker->hentry.next || worker->hentry.pprev));
1024c8e55f36STejun Heo 
1025d302f017STejun Heo 	worker_set_flags(worker, WORKER_IDLE, false);
1026c8e55f36STejun Heo 	gcwq->nr_idle++;
1027*e22bee78STejun Heo 	worker->last_active = jiffies;
1028c8e55f36STejun Heo 
1029c8e55f36STejun Heo 	/* idle_list is LIFO */
1030c8e55f36STejun Heo 	list_add(&worker->entry, &gcwq->idle_list);
1031db7bccf4STejun Heo 
1032*e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_ROGUE))) {
1033*e22bee78STejun Heo 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1034*e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer,
1035*e22bee78STejun Heo 				  jiffies + IDLE_WORKER_TIMEOUT);
1036*e22bee78STejun Heo 	} else
1037db7bccf4STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1038c8e55f36STejun Heo }
1039c8e55f36STejun Heo 
1040c8e55f36STejun Heo /**
1041c8e55f36STejun Heo  * worker_leave_idle - leave idle state
1042c8e55f36STejun Heo  * @worker: worker which is leaving idle state
1043c8e55f36STejun Heo  *
1044c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
1045c8e55f36STejun Heo  *
1046c8e55f36STejun Heo  * LOCKING:
1047c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1048c8e55f36STejun Heo  */
1049c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
1050c8e55f36STejun Heo {
1051c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1052c8e55f36STejun Heo 
1053c8e55f36STejun Heo 	BUG_ON(!(worker->flags & WORKER_IDLE));
1054d302f017STejun Heo 	worker_clr_flags(worker, WORKER_IDLE);
1055c8e55f36STejun Heo 	gcwq->nr_idle--;
1056c8e55f36STejun Heo 	list_del_init(&worker->entry);
1057c8e55f36STejun Heo }
1058c8e55f36STejun Heo 
1059*e22bee78STejun Heo /**
1060*e22bee78STejun Heo  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1061*e22bee78STejun Heo  * @worker: self
1062*e22bee78STejun Heo  *
1063*e22bee78STejun Heo  * Works which are scheduled while the cpu is online must at least be
1064*e22bee78STejun Heo  * scheduled to a worker which is bound to the cpu so that if they are
1065*e22bee78STejun Heo  * flushed from cpu callbacks while cpu is going down, they are
1066*e22bee78STejun Heo  * guaranteed to execute on the cpu.
1067*e22bee78STejun Heo  *
1068*e22bee78STejun Heo  * This function is to be used by rogue workers and rescuers to bind
1069*e22bee78STejun Heo  * themselves to the target cpu and may race with cpu going down or
1070*e22bee78STejun Heo  * coming online.  kthread_bind() can't be used because it may put the
1071*e22bee78STejun Heo  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1072*e22bee78STejun Heo  * verbatim as it's best effort and blocking and gcwq may be
1073*e22bee78STejun Heo  * [dis]associated in the meantime.
1074*e22bee78STejun Heo  *
1075*e22bee78STejun Heo  * This function tries set_cpus_allowed() and locks gcwq and verifies
1076*e22bee78STejun Heo  * the binding against GCWQ_DISASSOCIATED which is set during
1077*e22bee78STejun Heo  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1078*e22bee78STejun Heo  * idle state or fetches works without dropping lock, it can guarantee
1079*e22bee78STejun Heo  * the scheduling requirement described in the first paragraph.
1080*e22bee78STejun Heo  *
1081*e22bee78STejun Heo  * CONTEXT:
1082*e22bee78STejun Heo  * Might sleep.  Called without any lock but returns with gcwq->lock
1083*e22bee78STejun Heo  * held.
1084*e22bee78STejun Heo  *
1085*e22bee78STejun Heo  * RETURNS:
1086*e22bee78STejun Heo  * %true if the associated gcwq is online (@worker is successfully
1087*e22bee78STejun Heo  * bound), %false if offline.
1088*e22bee78STejun Heo  */
1089*e22bee78STejun Heo static bool worker_maybe_bind_and_lock(struct worker *worker)
1090*e22bee78STejun Heo {
1091*e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1092*e22bee78STejun Heo 	struct task_struct *task = worker->task;
1093*e22bee78STejun Heo 
1094*e22bee78STejun Heo 	while (true) {
1095*e22bee78STejun Heo 		/*
1096*e22bee78STejun Heo 		 * The following call may fail, succeed or succeed
1097*e22bee78STejun Heo 		 * without actually migrating the task to the cpu if
1098*e22bee78STejun Heo 		 * it races with cpu hotunplug operation.  Verify
1099*e22bee78STejun Heo 		 * against GCWQ_DISASSOCIATED.
1100*e22bee78STejun Heo 		 */
1101*e22bee78STejun Heo 		set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1102*e22bee78STejun Heo 
1103*e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1104*e22bee78STejun Heo 		if (gcwq->flags & GCWQ_DISASSOCIATED)
1105*e22bee78STejun Heo 			return false;
1106*e22bee78STejun Heo 		if (task_cpu(task) == gcwq->cpu &&
1107*e22bee78STejun Heo 		    cpumask_equal(&current->cpus_allowed,
1108*e22bee78STejun Heo 				  get_cpu_mask(gcwq->cpu)))
1109*e22bee78STejun Heo 			return true;
1110*e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1111*e22bee78STejun Heo 
1112*e22bee78STejun Heo 		/* CPU has come up inbetween, retry migration */
1113*e22bee78STejun Heo 		cpu_relax();
1114*e22bee78STejun Heo 	}
1115*e22bee78STejun Heo }
1116*e22bee78STejun Heo 
1117*e22bee78STejun Heo /*
1118*e22bee78STejun Heo  * Function for worker->rebind_work used to rebind rogue busy workers
1119*e22bee78STejun Heo  * to the associated cpu which is coming back online.  This is
1120*e22bee78STejun Heo  * scheduled by cpu up but can race with other cpu hotplug operations
1121*e22bee78STejun Heo  * and may be executed twice without intervening cpu down.
1122*e22bee78STejun Heo  */
1123*e22bee78STejun Heo static void worker_rebind_fn(struct work_struct *work)
1124*e22bee78STejun Heo {
1125*e22bee78STejun Heo 	struct worker *worker = container_of(work, struct worker, rebind_work);
1126*e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1127*e22bee78STejun Heo 
1128*e22bee78STejun Heo 	if (worker_maybe_bind_and_lock(worker))
1129*e22bee78STejun Heo 		worker_clr_flags(worker, WORKER_REBIND);
1130*e22bee78STejun Heo 
1131*e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1132*e22bee78STejun Heo }
1133*e22bee78STejun Heo 
1134c34056a3STejun Heo static struct worker *alloc_worker(void)
1135c34056a3STejun Heo {
1136c34056a3STejun Heo 	struct worker *worker;
1137c34056a3STejun Heo 
1138c34056a3STejun Heo 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1139c8e55f36STejun Heo 	if (worker) {
1140c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
1141affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
1142*e22bee78STejun Heo 		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1143*e22bee78STejun Heo 		/* on creation a worker is in !idle && prep state */
1144*e22bee78STejun Heo 		worker->flags = WORKER_PREP;
1145c8e55f36STejun Heo 	}
1146c34056a3STejun Heo 	return worker;
1147c34056a3STejun Heo }
1148c34056a3STejun Heo 
1149c34056a3STejun Heo /**
1150c34056a3STejun Heo  * create_worker - create a new workqueue worker
11517e11629dSTejun Heo  * @gcwq: gcwq the new worker will belong to
1152c34056a3STejun Heo  * @bind: whether to set affinity to @cpu or not
1153c34056a3STejun Heo  *
11547e11629dSTejun Heo  * Create a new worker which is bound to @gcwq.  The returned worker
1155c34056a3STejun Heo  * can be started by calling start_worker() or destroyed using
1156c34056a3STejun Heo  * destroy_worker().
1157c34056a3STejun Heo  *
1158c34056a3STejun Heo  * CONTEXT:
1159c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
1160c34056a3STejun Heo  *
1161c34056a3STejun Heo  * RETURNS:
1162c34056a3STejun Heo  * Pointer to the newly created worker.
1163c34056a3STejun Heo  */
11647e11629dSTejun Heo static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1165c34056a3STejun Heo {
1166c34056a3STejun Heo 	int id = -1;
1167c34056a3STejun Heo 	struct worker *worker = NULL;
1168c34056a3STejun Heo 
11698b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
11708b03ae3cSTejun Heo 	while (ida_get_new(&gcwq->worker_ida, &id)) {
11718b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
11728b03ae3cSTejun Heo 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1173c34056a3STejun Heo 			goto fail;
11748b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
1175c34056a3STejun Heo 	}
11768b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1177c34056a3STejun Heo 
1178c34056a3STejun Heo 	worker = alloc_worker();
1179c34056a3STejun Heo 	if (!worker)
1180c34056a3STejun Heo 		goto fail;
1181c34056a3STejun Heo 
11828b03ae3cSTejun Heo 	worker->gcwq = gcwq;
1183c34056a3STejun Heo 	worker->id = id;
1184c34056a3STejun Heo 
1185c34056a3STejun Heo 	worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
11868b03ae3cSTejun Heo 				      gcwq->cpu, id);
1187c34056a3STejun Heo 	if (IS_ERR(worker->task))
1188c34056a3STejun Heo 		goto fail;
1189c34056a3STejun Heo 
1190db7bccf4STejun Heo 	/*
1191db7bccf4STejun Heo 	 * A rogue worker will become a regular one if CPU comes
1192db7bccf4STejun Heo 	 * online later on.  Make sure every worker has
1193db7bccf4STejun Heo 	 * PF_THREAD_BOUND set.
1194db7bccf4STejun Heo 	 */
1195c34056a3STejun Heo 	if (bind)
11968b03ae3cSTejun Heo 		kthread_bind(worker->task, gcwq->cpu);
1197db7bccf4STejun Heo 	else
1198db7bccf4STejun Heo 		worker->task->flags |= PF_THREAD_BOUND;
1199c34056a3STejun Heo 
1200c34056a3STejun Heo 	return worker;
1201c34056a3STejun Heo fail:
1202c34056a3STejun Heo 	if (id >= 0) {
12038b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
12048b03ae3cSTejun Heo 		ida_remove(&gcwq->worker_ida, id);
12058b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1206c34056a3STejun Heo 	}
1207c34056a3STejun Heo 	kfree(worker);
1208c34056a3STejun Heo 	return NULL;
1209c34056a3STejun Heo }
1210c34056a3STejun Heo 
1211c34056a3STejun Heo /**
1212c34056a3STejun Heo  * start_worker - start a newly created worker
1213c34056a3STejun Heo  * @worker: worker to start
1214c34056a3STejun Heo  *
1215c8e55f36STejun Heo  * Make the gcwq aware of @worker and start it.
1216c34056a3STejun Heo  *
1217c34056a3STejun Heo  * CONTEXT:
12188b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1219c34056a3STejun Heo  */
1220c34056a3STejun Heo static void start_worker(struct worker *worker)
1221c34056a3STejun Heo {
1222d302f017STejun Heo 	worker_set_flags(worker, WORKER_STARTED, false);
1223c8e55f36STejun Heo 	worker->gcwq->nr_workers++;
1224c8e55f36STejun Heo 	worker_enter_idle(worker);
1225c34056a3STejun Heo 	wake_up_process(worker->task);
1226c34056a3STejun Heo }
1227c34056a3STejun Heo 
1228c34056a3STejun Heo /**
1229c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
1230c34056a3STejun Heo  * @worker: worker to be destroyed
1231c34056a3STejun Heo  *
1232c8e55f36STejun Heo  * Destroy @worker and adjust @gcwq stats accordingly.
1233c8e55f36STejun Heo  *
1234c8e55f36STejun Heo  * CONTEXT:
1235c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1236c34056a3STejun Heo  */
1237c34056a3STejun Heo static void destroy_worker(struct worker *worker)
1238c34056a3STejun Heo {
12398b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1240c34056a3STejun Heo 	int id = worker->id;
1241c34056a3STejun Heo 
1242c34056a3STejun Heo 	/* sanity check frenzy */
1243c34056a3STejun Heo 	BUG_ON(worker->current_work);
1244affee4b2STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1245c34056a3STejun Heo 
1246c8e55f36STejun Heo 	if (worker->flags & WORKER_STARTED)
1247c8e55f36STejun Heo 		gcwq->nr_workers--;
1248c8e55f36STejun Heo 	if (worker->flags & WORKER_IDLE)
1249c8e55f36STejun Heo 		gcwq->nr_idle--;
1250c8e55f36STejun Heo 
1251c8e55f36STejun Heo 	list_del_init(&worker->entry);
1252d302f017STejun Heo 	worker_set_flags(worker, WORKER_DIE, false);
1253c8e55f36STejun Heo 
1254c8e55f36STejun Heo 	spin_unlock_irq(&gcwq->lock);
1255c8e55f36STejun Heo 
1256c34056a3STejun Heo 	kthread_stop(worker->task);
1257c34056a3STejun Heo 	kfree(worker);
1258c34056a3STejun Heo 
12598b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
12608b03ae3cSTejun Heo 	ida_remove(&gcwq->worker_ida, id);
1261c34056a3STejun Heo }
1262c34056a3STejun Heo 
1263*e22bee78STejun Heo static void idle_worker_timeout(unsigned long __gcwq)
1264*e22bee78STejun Heo {
1265*e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1266*e22bee78STejun Heo 
1267*e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1268*e22bee78STejun Heo 
1269*e22bee78STejun Heo 	if (too_many_workers(gcwq)) {
1270*e22bee78STejun Heo 		struct worker *worker;
1271*e22bee78STejun Heo 		unsigned long expires;
1272*e22bee78STejun Heo 
1273*e22bee78STejun Heo 		/* idle_list is kept in LIFO order, check the last one */
1274*e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1275*e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1276*e22bee78STejun Heo 
1277*e22bee78STejun Heo 		if (time_before(jiffies, expires))
1278*e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1279*e22bee78STejun Heo 		else {
1280*e22bee78STejun Heo 			/* it's been idle for too long, wake up manager */
1281*e22bee78STejun Heo 			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1282*e22bee78STejun Heo 			wake_up_worker(gcwq);
1283*e22bee78STejun Heo 		}
1284*e22bee78STejun Heo 	}
1285*e22bee78STejun Heo 
1286*e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1287*e22bee78STejun Heo }
1288*e22bee78STejun Heo 
1289*e22bee78STejun Heo static bool send_mayday(struct work_struct *work)
1290*e22bee78STejun Heo {
1291*e22bee78STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1292*e22bee78STejun Heo 	struct workqueue_struct *wq = cwq->wq;
1293*e22bee78STejun Heo 
1294*e22bee78STejun Heo 	if (!(wq->flags & WQ_RESCUER))
1295*e22bee78STejun Heo 		return false;
1296*e22bee78STejun Heo 
1297*e22bee78STejun Heo 	/* mayday mayday mayday */
1298*e22bee78STejun Heo 	if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask))
1299*e22bee78STejun Heo 		wake_up_process(wq->rescuer->task);
1300*e22bee78STejun Heo 	return true;
1301*e22bee78STejun Heo }
1302*e22bee78STejun Heo 
1303*e22bee78STejun Heo static void gcwq_mayday_timeout(unsigned long __gcwq)
1304*e22bee78STejun Heo {
1305*e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1306*e22bee78STejun Heo 	struct work_struct *work;
1307*e22bee78STejun Heo 
1308*e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1309*e22bee78STejun Heo 
1310*e22bee78STejun Heo 	if (need_to_create_worker(gcwq)) {
1311*e22bee78STejun Heo 		/*
1312*e22bee78STejun Heo 		 * We've been trying to create a new worker but
1313*e22bee78STejun Heo 		 * haven't been successful.  We might be hitting an
1314*e22bee78STejun Heo 		 * allocation deadlock.  Send distress signals to
1315*e22bee78STejun Heo 		 * rescuers.
1316*e22bee78STejun Heo 		 */
1317*e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry)
1318*e22bee78STejun Heo 			send_mayday(work);
1319*e22bee78STejun Heo 	}
1320*e22bee78STejun Heo 
1321*e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1322*e22bee78STejun Heo 
1323*e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1324*e22bee78STejun Heo }
1325*e22bee78STejun Heo 
1326*e22bee78STejun Heo /**
1327*e22bee78STejun Heo  * maybe_create_worker - create a new worker if necessary
1328*e22bee78STejun Heo  * @gcwq: gcwq to create a new worker for
1329*e22bee78STejun Heo  *
1330*e22bee78STejun Heo  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1331*e22bee78STejun Heo  * have at least one idle worker on return from this function.  If
1332*e22bee78STejun Heo  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1333*e22bee78STejun Heo  * sent to all rescuers with works scheduled on @gcwq to resolve
1334*e22bee78STejun Heo  * possible allocation deadlock.
1335*e22bee78STejun Heo  *
1336*e22bee78STejun Heo  * On return, need_to_create_worker() is guaranteed to be false and
1337*e22bee78STejun Heo  * may_start_working() true.
1338*e22bee78STejun Heo  *
1339*e22bee78STejun Heo  * LOCKING:
1340*e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1341*e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.  Called only from
1342*e22bee78STejun Heo  * manager.
1343*e22bee78STejun Heo  *
1344*e22bee78STejun Heo  * RETURNS:
1345*e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1346*e22bee78STejun Heo  * otherwise.
1347*e22bee78STejun Heo  */
1348*e22bee78STejun Heo static bool maybe_create_worker(struct global_cwq *gcwq)
1349*e22bee78STejun Heo {
1350*e22bee78STejun Heo 	if (!need_to_create_worker(gcwq))
1351*e22bee78STejun Heo 		return false;
1352*e22bee78STejun Heo restart:
1353*e22bee78STejun Heo 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1354*e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1355*e22bee78STejun Heo 
1356*e22bee78STejun Heo 	while (true) {
1357*e22bee78STejun Heo 		struct worker *worker;
1358*e22bee78STejun Heo 
1359*e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1360*e22bee78STejun Heo 
1361*e22bee78STejun Heo 		worker = create_worker(gcwq, true);
1362*e22bee78STejun Heo 		if (worker) {
1363*e22bee78STejun Heo 			del_timer_sync(&gcwq->mayday_timer);
1364*e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
1365*e22bee78STejun Heo 			start_worker(worker);
1366*e22bee78STejun Heo 			BUG_ON(need_to_create_worker(gcwq));
1367*e22bee78STejun Heo 			return true;
1368*e22bee78STejun Heo 		}
1369*e22bee78STejun Heo 
1370*e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1371*e22bee78STejun Heo 			break;
1372*e22bee78STejun Heo 
1373*e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1374*e22bee78STejun Heo 		__set_current_state(TASK_INTERRUPTIBLE);
1375*e22bee78STejun Heo 		schedule_timeout(CREATE_COOLDOWN);
1376*e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1377*e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1378*e22bee78STejun Heo 			break;
1379*e22bee78STejun Heo 	}
1380*e22bee78STejun Heo 
1381*e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1382*e22bee78STejun Heo 	del_timer_sync(&gcwq->mayday_timer);
1383*e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1384*e22bee78STejun Heo 	if (need_to_create_worker(gcwq))
1385*e22bee78STejun Heo 		goto restart;
1386*e22bee78STejun Heo 	return true;
1387*e22bee78STejun Heo }
1388*e22bee78STejun Heo 
1389*e22bee78STejun Heo /**
1390*e22bee78STejun Heo  * maybe_destroy_worker - destroy workers which have been idle for a while
1391*e22bee78STejun Heo  * @gcwq: gcwq to destroy workers for
1392*e22bee78STejun Heo  *
1393*e22bee78STejun Heo  * Destroy @gcwq workers which have been idle for longer than
1394*e22bee78STejun Heo  * IDLE_WORKER_TIMEOUT.
1395*e22bee78STejun Heo  *
1396*e22bee78STejun Heo  * LOCKING:
1397*e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1398*e22bee78STejun Heo  * multiple times.  Called only from manager.
1399*e22bee78STejun Heo  *
1400*e22bee78STejun Heo  * RETURNS:
1401*e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1402*e22bee78STejun Heo  * otherwise.
1403*e22bee78STejun Heo  */
1404*e22bee78STejun Heo static bool maybe_destroy_workers(struct global_cwq *gcwq)
1405*e22bee78STejun Heo {
1406*e22bee78STejun Heo 	bool ret = false;
1407*e22bee78STejun Heo 
1408*e22bee78STejun Heo 	while (too_many_workers(gcwq)) {
1409*e22bee78STejun Heo 		struct worker *worker;
1410*e22bee78STejun Heo 		unsigned long expires;
1411*e22bee78STejun Heo 
1412*e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1413*e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1414*e22bee78STejun Heo 
1415*e22bee78STejun Heo 		if (time_before(jiffies, expires)) {
1416*e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1417*e22bee78STejun Heo 			break;
1418*e22bee78STejun Heo 		}
1419*e22bee78STejun Heo 
1420*e22bee78STejun Heo 		destroy_worker(worker);
1421*e22bee78STejun Heo 		ret = true;
1422*e22bee78STejun Heo 	}
1423*e22bee78STejun Heo 
1424*e22bee78STejun Heo 	return ret;
1425*e22bee78STejun Heo }
1426*e22bee78STejun Heo 
1427*e22bee78STejun Heo /**
1428*e22bee78STejun Heo  * manage_workers - manage worker pool
1429*e22bee78STejun Heo  * @worker: self
1430*e22bee78STejun Heo  *
1431*e22bee78STejun Heo  * Assume the manager role and manage gcwq worker pool @worker belongs
1432*e22bee78STejun Heo  * to.  At any given time, there can be only zero or one manager per
1433*e22bee78STejun Heo  * gcwq.  The exclusion is handled automatically by this function.
1434*e22bee78STejun Heo  *
1435*e22bee78STejun Heo  * The caller can safely start processing works on false return.  On
1436*e22bee78STejun Heo  * true return, it's guaranteed that need_to_create_worker() is false
1437*e22bee78STejun Heo  * and may_start_working() is true.
1438*e22bee78STejun Heo  *
1439*e22bee78STejun Heo  * CONTEXT:
1440*e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1441*e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.
1442*e22bee78STejun Heo  *
1443*e22bee78STejun Heo  * RETURNS:
1444*e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true if
1445*e22bee78STejun Heo  * some action was taken.
1446*e22bee78STejun Heo  */
1447*e22bee78STejun Heo static bool manage_workers(struct worker *worker)
1448*e22bee78STejun Heo {
1449*e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1450*e22bee78STejun Heo 	bool ret = false;
1451*e22bee78STejun Heo 
1452*e22bee78STejun Heo 	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1453*e22bee78STejun Heo 		return ret;
1454*e22bee78STejun Heo 
1455*e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1456*e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1457*e22bee78STejun Heo 
1458*e22bee78STejun Heo 	/*
1459*e22bee78STejun Heo 	 * Destroy and then create so that may_start_working() is true
1460*e22bee78STejun Heo 	 * on return.
1461*e22bee78STejun Heo 	 */
1462*e22bee78STejun Heo 	ret |= maybe_destroy_workers(gcwq);
1463*e22bee78STejun Heo 	ret |= maybe_create_worker(gcwq);
1464*e22bee78STejun Heo 
1465*e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1466*e22bee78STejun Heo 
1467*e22bee78STejun Heo 	/*
1468*e22bee78STejun Heo 	 * The trustee might be waiting to take over the manager
1469*e22bee78STejun Heo 	 * position, tell it we're done.
1470*e22bee78STejun Heo 	 */
1471*e22bee78STejun Heo 	if (unlikely(gcwq->trustee))
1472*e22bee78STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1473*e22bee78STejun Heo 
1474*e22bee78STejun Heo 	return ret;
1475*e22bee78STejun Heo }
1476*e22bee78STejun Heo 
1477a62428c0STejun Heo /**
1478affee4b2STejun Heo  * move_linked_works - move linked works to a list
1479affee4b2STejun Heo  * @work: start of series of works to be scheduled
1480affee4b2STejun Heo  * @head: target list to append @work to
1481affee4b2STejun Heo  * @nextp: out paramter for nested worklist walking
1482affee4b2STejun Heo  *
1483affee4b2STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
1484affee4b2STejun Heo  * be scheduled starts at @work and includes any consecutive work with
1485affee4b2STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
1486affee4b2STejun Heo  *
1487affee4b2STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
1488affee4b2STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
1489affee4b2STejun Heo  * nested inside outer list_for_each_entry_safe().
1490affee4b2STejun Heo  *
1491affee4b2STejun Heo  * CONTEXT:
14928b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1493affee4b2STejun Heo  */
1494affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
1495affee4b2STejun Heo 			      struct work_struct **nextp)
1496affee4b2STejun Heo {
1497affee4b2STejun Heo 	struct work_struct *n;
1498affee4b2STejun Heo 
1499affee4b2STejun Heo 	/*
1500affee4b2STejun Heo 	 * Linked worklist will always end before the end of the list,
1501affee4b2STejun Heo 	 * use NULL for list head.
1502affee4b2STejun Heo 	 */
1503affee4b2STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1504affee4b2STejun Heo 		list_move_tail(&work->entry, head);
1505affee4b2STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1506affee4b2STejun Heo 			break;
1507affee4b2STejun Heo 	}
1508affee4b2STejun Heo 
1509affee4b2STejun Heo 	/*
1510affee4b2STejun Heo 	 * If we're already inside safe list traversal and have moved
1511affee4b2STejun Heo 	 * multiple works to the scheduled queue, the next position
1512affee4b2STejun Heo 	 * needs to be updated.
1513affee4b2STejun Heo 	 */
1514affee4b2STejun Heo 	if (nextp)
1515affee4b2STejun Heo 		*nextp = n;
1516affee4b2STejun Heo }
1517affee4b2STejun Heo 
15181e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
15191e19ffc6STejun Heo {
15201e19ffc6STejun Heo 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
15211e19ffc6STejun Heo 						    struct work_struct, entry);
15221e19ffc6STejun Heo 
15237e11629dSTejun Heo 	move_linked_works(work, &cwq->gcwq->worklist, NULL);
15241e19ffc6STejun Heo 	cwq->nr_active++;
15251e19ffc6STejun Heo }
15261e19ffc6STejun Heo 
1527affee4b2STejun Heo /**
152873f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
152973f53c4aSTejun Heo  * @cwq: cwq of interest
153073f53c4aSTejun Heo  * @color: color of work which left the queue
153173f53c4aSTejun Heo  *
153273f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
153373f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
153473f53c4aSTejun Heo  *
153573f53c4aSTejun Heo  * CONTEXT:
15368b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
153773f53c4aSTejun Heo  */
153873f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
153973f53c4aSTejun Heo {
154073f53c4aSTejun Heo 	/* ignore uncolored works */
154173f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
154273f53c4aSTejun Heo 		return;
154373f53c4aSTejun Heo 
154473f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
15451e19ffc6STejun Heo 	cwq->nr_active--;
15461e19ffc6STejun Heo 
1547502ca9d8STejun Heo 	if (!list_empty(&cwq->delayed_works)) {
15481e19ffc6STejun Heo 		/* one down, submit a delayed one */
1549502ca9d8STejun Heo 		if (cwq->nr_active < cwq->max_active)
15501e19ffc6STejun Heo 			cwq_activate_first_delayed(cwq);
1551502ca9d8STejun Heo 	} else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
1552502ca9d8STejun Heo 		/* this was the last work, unbind from single cpu */
1553502ca9d8STejun Heo 		cwq_unbind_single_cpu(cwq);
1554502ca9d8STejun Heo 	}
155573f53c4aSTejun Heo 
155673f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
155773f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
155873f53c4aSTejun Heo 		return;
155973f53c4aSTejun Heo 
156073f53c4aSTejun Heo 	/* are there still in-flight works? */
156173f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
156273f53c4aSTejun Heo 		return;
156373f53c4aSTejun Heo 
156473f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
156573f53c4aSTejun Heo 	cwq->flush_color = -1;
156673f53c4aSTejun Heo 
156773f53c4aSTejun Heo 	/*
156873f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
156973f53c4aSTejun Heo 	 * will handle the rest.
157073f53c4aSTejun Heo 	 */
157173f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
157273f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
157373f53c4aSTejun Heo }
157473f53c4aSTejun Heo 
157573f53c4aSTejun Heo /**
1576a62428c0STejun Heo  * process_one_work - process single work
1577c34056a3STejun Heo  * @worker: self
1578a62428c0STejun Heo  * @work: work to process
1579a62428c0STejun Heo  *
1580a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
1581a62428c0STejun Heo  * process a single work including synchronization against and
1582a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
1583a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
1584a62428c0STejun Heo  * call this function to process a work.
1585a62428c0STejun Heo  *
1586a62428c0STejun Heo  * CONTEXT:
15878b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1588a62428c0STejun Heo  */
1589c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
15901da177e4SLinus Torvalds {
15917e11629dSTejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
15928b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
1593c8e55f36STejun Heo 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
15946bb49e59SDavid Howells 	work_func_t f = work->func;
159573f53c4aSTejun Heo 	int work_color;
15967e11629dSTejun Heo 	struct worker *collision;
15974e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
15984e6045f1SJohannes Berg 	/*
1599a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
1600a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
1601a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
1602a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
1603a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
16044e6045f1SJohannes Berg 	 */
16054e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
16064e6045f1SJohannes Berg #endif
16077e11629dSTejun Heo 	/*
16087e11629dSTejun Heo 	 * A single work shouldn't be executed concurrently by
16097e11629dSTejun Heo 	 * multiple workers on a single cpu.  Check whether anyone is
16107e11629dSTejun Heo 	 * already processing the work.  If so, defer the work to the
16117e11629dSTejun Heo 	 * currently executing one.
16127e11629dSTejun Heo 	 */
16137e11629dSTejun Heo 	collision = __find_worker_executing_work(gcwq, bwh, work);
16147e11629dSTejun Heo 	if (unlikely(collision)) {
16157e11629dSTejun Heo 		move_linked_works(work, &collision->scheduled, NULL);
16167e11629dSTejun Heo 		return;
16177e11629dSTejun Heo 	}
16187e11629dSTejun Heo 
1619a62428c0STejun Heo 	/* claim and process */
1620dc186ad7SThomas Gleixner 	debug_work_deactivate(work);
1621c8e55f36STejun Heo 	hlist_add_head(&worker->hentry, bwh);
1622c34056a3STejun Heo 	worker->current_work = work;
16238cca0eeaSTejun Heo 	worker->current_cwq = cwq;
162473f53c4aSTejun Heo 	work_color = get_work_color(work);
16257a22ad75STejun Heo 
16267a22ad75STejun Heo 	/* record the current cpu number in the work data and dequeue */
16277a22ad75STejun Heo 	set_work_cpu(work, gcwq->cpu);
1628a62428c0STejun Heo 	list_del_init(&work->entry);
1629a62428c0STejun Heo 
16308b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
16311da177e4SLinus Torvalds 
163223b2e599SOleg Nesterov 	work_clear_pending(work);
16333295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
16343295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
163565f27f38SDavid Howells 	f(work);
16363295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
16373295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
16381da177e4SLinus Torvalds 
1639d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1640d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1641d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
1642a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
1643d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
1644d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
1645d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
1646d5abe669SPeter Zijlstra 		dump_stack();
1647d5abe669SPeter Zijlstra 	}
1648d5abe669SPeter Zijlstra 
16498b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1650a62428c0STejun Heo 
1651a62428c0STejun Heo 	/* we're done with it, release */
1652c8e55f36STejun Heo 	hlist_del_init(&worker->hentry);
1653c34056a3STejun Heo 	worker->current_work = NULL;
16548cca0eeaSTejun Heo 	worker->current_cwq = NULL;
165573f53c4aSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color);
16561da177e4SLinus Torvalds }
1657a62428c0STejun Heo 
1658affee4b2STejun Heo /**
1659affee4b2STejun Heo  * process_scheduled_works - process scheduled works
1660affee4b2STejun Heo  * @worker: self
1661affee4b2STejun Heo  *
1662affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
1663affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
1664affee4b2STejun Heo  * fetches a work from the top and executes it.
1665affee4b2STejun Heo  *
1666affee4b2STejun Heo  * CONTEXT:
16678b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1668affee4b2STejun Heo  * multiple times.
1669affee4b2STejun Heo  */
1670affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
1671a62428c0STejun Heo {
1672affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
1673affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
1674a62428c0STejun Heo 						struct work_struct, entry);
1675c34056a3STejun Heo 		process_one_work(worker, work);
1676a62428c0STejun Heo 	}
16771da177e4SLinus Torvalds }
16781da177e4SLinus Torvalds 
16794690c4abSTejun Heo /**
16804690c4abSTejun Heo  * worker_thread - the worker thread function
1681c34056a3STejun Heo  * @__worker: self
16824690c4abSTejun Heo  *
1683*e22bee78STejun Heo  * The gcwq worker thread function.  There's a single dynamic pool of
1684*e22bee78STejun Heo  * these per each cpu.  These workers process all works regardless of
1685*e22bee78STejun Heo  * their specific target workqueue.  The only exception is works which
1686*e22bee78STejun Heo  * belong to workqueues with a rescuer which will be explained in
1687*e22bee78STejun Heo  * rescuer_thread().
16884690c4abSTejun Heo  */
1689c34056a3STejun Heo static int worker_thread(void *__worker)
16901da177e4SLinus Torvalds {
1691c34056a3STejun Heo 	struct worker *worker = __worker;
16928b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
16931da177e4SLinus Torvalds 
1694*e22bee78STejun Heo 	/* tell the scheduler that this is a workqueue worker */
1695*e22bee78STejun Heo 	worker->task->flags |= PF_WQ_WORKER;
1696c8e55f36STejun Heo woke_up:
16978b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1698affee4b2STejun Heo 
1699c8e55f36STejun Heo 	/* DIE can be set only while we're idle, checking here is enough */
1700c8e55f36STejun Heo 	if (worker->flags & WORKER_DIE) {
1701c8e55f36STejun Heo 		spin_unlock_irq(&gcwq->lock);
1702*e22bee78STejun Heo 		worker->task->flags &= ~PF_WQ_WORKER;
1703c8e55f36STejun Heo 		return 0;
1704c8e55f36STejun Heo 	}
1705c8e55f36STejun Heo 
1706c8e55f36STejun Heo 	worker_leave_idle(worker);
1707db7bccf4STejun Heo recheck:
1708*e22bee78STejun Heo 	/* no more worker necessary? */
1709*e22bee78STejun Heo 	if (!need_more_worker(gcwq))
1710*e22bee78STejun Heo 		goto sleep;
1711*e22bee78STejun Heo 
1712*e22bee78STejun Heo 	/* do we need to manage? */
1713*e22bee78STejun Heo 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1714*e22bee78STejun Heo 		goto recheck;
1715*e22bee78STejun Heo 
1716c8e55f36STejun Heo 	/*
1717c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
1718c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
1719c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
1720c8e55f36STejun Heo 	 */
1721c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1722c8e55f36STejun Heo 
1723*e22bee78STejun Heo 	/*
1724*e22bee78STejun Heo 	 * When control reaches this point, we're guaranteed to have
1725*e22bee78STejun Heo 	 * at least one idle worker or that someone else has already
1726*e22bee78STejun Heo 	 * assumed the manager role.
1727*e22bee78STejun Heo 	 */
1728*e22bee78STejun Heo 	worker_clr_flags(worker, WORKER_PREP);
1729*e22bee78STejun Heo 
1730*e22bee78STejun Heo 	do {
1731affee4b2STejun Heo 		struct work_struct *work =
17327e11629dSTejun Heo 			list_first_entry(&gcwq->worklist,
1733affee4b2STejun Heo 					 struct work_struct, entry);
1734affee4b2STejun Heo 
1735c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1736affee4b2STejun Heo 			/* optimization path, not strictly necessary */
1737affee4b2STejun Heo 			process_one_work(worker, work);
1738affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
1739affee4b2STejun Heo 				process_scheduled_works(worker);
1740affee4b2STejun Heo 		} else {
1741c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
1742affee4b2STejun Heo 			process_scheduled_works(worker);
1743affee4b2STejun Heo 		}
1744*e22bee78STejun Heo 	} while (keep_working(gcwq));
1745affee4b2STejun Heo 
1746*e22bee78STejun Heo 	worker_set_flags(worker, WORKER_PREP, false);
1747*e22bee78STejun Heo 
1748*e22bee78STejun Heo 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1749*e22bee78STejun Heo 		goto recheck;
1750*e22bee78STejun Heo sleep:
1751c8e55f36STejun Heo 	/*
1752*e22bee78STejun Heo 	 * gcwq->lock is held and there's no work to process and no
1753*e22bee78STejun Heo 	 * need to manage, sleep.  Workers are woken up only while
1754*e22bee78STejun Heo 	 * holding gcwq->lock or from local cpu, so setting the
1755*e22bee78STejun Heo 	 * current state before releasing gcwq->lock is enough to
1756*e22bee78STejun Heo 	 * prevent losing any event.
1757c8e55f36STejun Heo 	 */
1758c8e55f36STejun Heo 	worker_enter_idle(worker);
1759c8e55f36STejun Heo 	__set_current_state(TASK_INTERRUPTIBLE);
17608b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1761c8e55f36STejun Heo 	schedule();
1762c8e55f36STejun Heo 	goto woke_up;
17631da177e4SLinus Torvalds }
17641da177e4SLinus Torvalds 
1765*e22bee78STejun Heo /**
1766*e22bee78STejun Heo  * rescuer_thread - the rescuer thread function
1767*e22bee78STejun Heo  * @__wq: the associated workqueue
1768*e22bee78STejun Heo  *
1769*e22bee78STejun Heo  * Workqueue rescuer thread function.  There's one rescuer for each
1770*e22bee78STejun Heo  * workqueue which has WQ_RESCUER set.
1771*e22bee78STejun Heo  *
1772*e22bee78STejun Heo  * Regular work processing on a gcwq may block trying to create a new
1773*e22bee78STejun Heo  * worker which uses GFP_KERNEL allocation which has slight chance of
1774*e22bee78STejun Heo  * developing into deadlock if some works currently on the same queue
1775*e22bee78STejun Heo  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
1776*e22bee78STejun Heo  * the problem rescuer solves.
1777*e22bee78STejun Heo  *
1778*e22bee78STejun Heo  * When such condition is possible, the gcwq summons rescuers of all
1779*e22bee78STejun Heo  * workqueues which have works queued on the gcwq and let them process
1780*e22bee78STejun Heo  * those works so that forward progress can be guaranteed.
1781*e22bee78STejun Heo  *
1782*e22bee78STejun Heo  * This should happen rarely.
1783*e22bee78STejun Heo  */
1784*e22bee78STejun Heo static int rescuer_thread(void *__wq)
1785*e22bee78STejun Heo {
1786*e22bee78STejun Heo 	struct workqueue_struct *wq = __wq;
1787*e22bee78STejun Heo 	struct worker *rescuer = wq->rescuer;
1788*e22bee78STejun Heo 	struct list_head *scheduled = &rescuer->scheduled;
1789*e22bee78STejun Heo 	unsigned int cpu;
1790*e22bee78STejun Heo 
1791*e22bee78STejun Heo 	set_user_nice(current, RESCUER_NICE_LEVEL);
1792*e22bee78STejun Heo repeat:
1793*e22bee78STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);
1794*e22bee78STejun Heo 
1795*e22bee78STejun Heo 	if (kthread_should_stop())
1796*e22bee78STejun Heo 		return 0;
1797*e22bee78STejun Heo 
1798*e22bee78STejun Heo 	for_each_cpu(cpu, wq->mayday_mask) {
1799*e22bee78STejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1800*e22bee78STejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
1801*e22bee78STejun Heo 		struct work_struct *work, *n;
1802*e22bee78STejun Heo 
1803*e22bee78STejun Heo 		__set_current_state(TASK_RUNNING);
1804*e22bee78STejun Heo 		cpumask_clear_cpu(cpu, wq->mayday_mask);
1805*e22bee78STejun Heo 
1806*e22bee78STejun Heo 		/* migrate to the target cpu if possible */
1807*e22bee78STejun Heo 		rescuer->gcwq = gcwq;
1808*e22bee78STejun Heo 		worker_maybe_bind_and_lock(rescuer);
1809*e22bee78STejun Heo 
1810*e22bee78STejun Heo 		/*
1811*e22bee78STejun Heo 		 * Slurp in all works issued via this workqueue and
1812*e22bee78STejun Heo 		 * process'em.
1813*e22bee78STejun Heo 		 */
1814*e22bee78STejun Heo 		BUG_ON(!list_empty(&rescuer->scheduled));
1815*e22bee78STejun Heo 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
1816*e22bee78STejun Heo 			if (get_work_cwq(work) == cwq)
1817*e22bee78STejun Heo 				move_linked_works(work, scheduled, &n);
1818*e22bee78STejun Heo 
1819*e22bee78STejun Heo 		process_scheduled_works(rescuer);
1820*e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1821*e22bee78STejun Heo 	}
1822*e22bee78STejun Heo 
1823*e22bee78STejun Heo 	schedule();
1824*e22bee78STejun Heo 	goto repeat;
1825*e22bee78STejun Heo }
1826*e22bee78STejun Heo 
1827fc2e4d70SOleg Nesterov struct wq_barrier {
1828fc2e4d70SOleg Nesterov 	struct work_struct	work;
1829fc2e4d70SOleg Nesterov 	struct completion	done;
1830fc2e4d70SOleg Nesterov };
1831fc2e4d70SOleg Nesterov 
1832fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
1833fc2e4d70SOleg Nesterov {
1834fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1835fc2e4d70SOleg Nesterov 	complete(&barr->done);
1836fc2e4d70SOleg Nesterov }
1837fc2e4d70SOleg Nesterov 
18384690c4abSTejun Heo /**
18394690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
18404690c4abSTejun Heo  * @cwq: cwq to insert barrier into
18414690c4abSTejun Heo  * @barr: wq_barrier to insert
1842affee4b2STejun Heo  * @target: target work to attach @barr to
1843affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
18444690c4abSTejun Heo  *
1845affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
1846affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
1847affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
1848affee4b2STejun Heo  * cpu.
1849affee4b2STejun Heo  *
1850affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
1851affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
1852affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
1853affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
1854affee4b2STejun Heo  * after a work with LINKED flag set.
1855affee4b2STejun Heo  *
1856affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
1857affee4b2STejun Heo  * underneath us, so we can't reliably determine cwq from @target.
18584690c4abSTejun Heo  *
18594690c4abSTejun Heo  * CONTEXT:
18608b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
18614690c4abSTejun Heo  */
186283c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1863affee4b2STejun Heo 			      struct wq_barrier *barr,
1864affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
1865fc2e4d70SOleg Nesterov {
1866affee4b2STejun Heo 	struct list_head *head;
1867affee4b2STejun Heo 	unsigned int linked = 0;
1868affee4b2STejun Heo 
1869dc186ad7SThomas Gleixner 	/*
18708b03ae3cSTejun Heo 	 * debugobject calls are safe here even with gcwq->lock locked
1871dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
1872dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
1873dc186ad7SThomas Gleixner 	 * might deadlock.
1874dc186ad7SThomas Gleixner 	 */
1875dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
187622df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1877fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
187883c22520SOleg Nesterov 
1879affee4b2STejun Heo 	/*
1880affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
1881affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
1882affee4b2STejun Heo 	 */
1883affee4b2STejun Heo 	if (worker)
1884affee4b2STejun Heo 		head = worker->scheduled.next;
1885affee4b2STejun Heo 	else {
1886affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
1887affee4b2STejun Heo 
1888affee4b2STejun Heo 		head = target->entry.next;
1889affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
1890affee4b2STejun Heo 		linked = *bits & WORK_STRUCT_LINKED;
1891affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
1892affee4b2STejun Heo 	}
1893affee4b2STejun Heo 
1894dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
1895affee4b2STejun Heo 	insert_work(cwq, &barr->work, head,
1896affee4b2STejun Heo 		    work_color_to_flags(WORK_NO_COLOR) | linked);
1897fc2e4d70SOleg Nesterov }
1898fc2e4d70SOleg Nesterov 
189973f53c4aSTejun Heo /**
190073f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
190173f53c4aSTejun Heo  * @wq: workqueue being flushed
190273f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
190373f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
190473f53c4aSTejun Heo  *
190573f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
190673f53c4aSTejun Heo  *
190773f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
190873f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
190973f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
191073f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
191173f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
191273f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
191373f53c4aSTejun Heo  *
191473f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
191573f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
191673f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
191773f53c4aSTejun Heo  * is returned.
191873f53c4aSTejun Heo  *
191973f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
192073f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
192173f53c4aSTejun Heo  * advanced to @work_color.
192273f53c4aSTejun Heo  *
192373f53c4aSTejun Heo  * CONTEXT:
192473f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
192573f53c4aSTejun Heo  *
192673f53c4aSTejun Heo  * RETURNS:
192773f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
192873f53c4aSTejun Heo  * otherwise.
192973f53c4aSTejun Heo  */
193073f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
193173f53c4aSTejun Heo 				      int flush_color, int work_color)
19321da177e4SLinus Torvalds {
193373f53c4aSTejun Heo 	bool wait = false;
193473f53c4aSTejun Heo 	unsigned int cpu;
19351da177e4SLinus Torvalds 
193673f53c4aSTejun Heo 	if (flush_color >= 0) {
193773f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
193873f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
193973f53c4aSTejun Heo 	}
194073f53c4aSTejun Heo 
194173f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
194273f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
19438b03ae3cSTejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
19442355b70fSLai Jiangshan 
19458b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
194673f53c4aSTejun Heo 
194773f53c4aSTejun Heo 		if (flush_color >= 0) {
194873f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
194973f53c4aSTejun Heo 
195073f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
195173f53c4aSTejun Heo 				cwq->flush_color = flush_color;
195273f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
195373f53c4aSTejun Heo 				wait = true;
195483c22520SOleg Nesterov 			}
195573f53c4aSTejun Heo 		}
195673f53c4aSTejun Heo 
195773f53c4aSTejun Heo 		if (work_color >= 0) {
195873f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
195973f53c4aSTejun Heo 			cwq->work_color = work_color;
196073f53c4aSTejun Heo 		}
196173f53c4aSTejun Heo 
19628b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1963dc186ad7SThomas Gleixner 	}
196414441960SOleg Nesterov 
196573f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
196673f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
196773f53c4aSTejun Heo 
196873f53c4aSTejun Heo 	return wait;
196983c22520SOleg Nesterov }
19701da177e4SLinus Torvalds 
19710fcb78c2SRolf Eike Beer /**
19721da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
19730fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
19741da177e4SLinus Torvalds  *
19751da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
19761da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
19771da177e4SLinus Torvalds  *
1978fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
1979fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
19801da177e4SLinus Torvalds  */
19817ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
19821da177e4SLinus Torvalds {
198373f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
198473f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
198573f53c4aSTejun Heo 		.flush_color = -1,
198673f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
198773f53c4aSTejun Heo 	};
198873f53c4aSTejun Heo 	int next_color;
1989b1f4ec17SOleg Nesterov 
19903295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
19913295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
199273f53c4aSTejun Heo 
199373f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
199473f53c4aSTejun Heo 
199573f53c4aSTejun Heo 	/*
199673f53c4aSTejun Heo 	 * Start-to-wait phase
199773f53c4aSTejun Heo 	 */
199873f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
199973f53c4aSTejun Heo 
200073f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
200173f53c4aSTejun Heo 		/*
200273f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
200373f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
200473f53c4aSTejun Heo 		 * by one.
200573f53c4aSTejun Heo 		 */
200673f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
200773f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
200873f53c4aSTejun Heo 		wq->work_color = next_color;
200973f53c4aSTejun Heo 
201073f53c4aSTejun Heo 		if (!wq->first_flusher) {
201173f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
201273f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
201373f53c4aSTejun Heo 
201473f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
201573f53c4aSTejun Heo 
201673f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
201773f53c4aSTejun Heo 						       wq->work_color)) {
201873f53c4aSTejun Heo 				/* nothing to flush, done */
201973f53c4aSTejun Heo 				wq->flush_color = next_color;
202073f53c4aSTejun Heo 				wq->first_flusher = NULL;
202173f53c4aSTejun Heo 				goto out_unlock;
202273f53c4aSTejun Heo 			}
202373f53c4aSTejun Heo 		} else {
202473f53c4aSTejun Heo 			/* wait in queue */
202573f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
202673f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
202773f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
202873f53c4aSTejun Heo 		}
202973f53c4aSTejun Heo 	} else {
203073f53c4aSTejun Heo 		/*
203173f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
203273f53c4aSTejun Heo 		 * The next flush completion will assign us
203373f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
203473f53c4aSTejun Heo 		 */
203573f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
203673f53c4aSTejun Heo 	}
203773f53c4aSTejun Heo 
203873f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
203973f53c4aSTejun Heo 
204073f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
204173f53c4aSTejun Heo 
204273f53c4aSTejun Heo 	/*
204373f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
204473f53c4aSTejun Heo 	 *
204573f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
204673f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
204773f53c4aSTejun Heo 	 */
204873f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
204973f53c4aSTejun Heo 		return;
205073f53c4aSTejun Heo 
205173f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
205273f53c4aSTejun Heo 
205373f53c4aSTejun Heo 	wq->first_flusher = NULL;
205473f53c4aSTejun Heo 
205573f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
205673f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
205773f53c4aSTejun Heo 
205873f53c4aSTejun Heo 	while (true) {
205973f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
206073f53c4aSTejun Heo 
206173f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
206273f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
206373f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
206473f53c4aSTejun Heo 				break;
206573f53c4aSTejun Heo 			list_del_init(&next->list);
206673f53c4aSTejun Heo 			complete(&next->done);
206773f53c4aSTejun Heo 		}
206873f53c4aSTejun Heo 
206973f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
207073f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
207173f53c4aSTejun Heo 
207273f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
207373f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
207473f53c4aSTejun Heo 
207573f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
207673f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
207773f53c4aSTejun Heo 			/*
207873f53c4aSTejun Heo 			 * Assign the same color to all overflowed
207973f53c4aSTejun Heo 			 * flushers, advance work_color and append to
208073f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
208173f53c4aSTejun Heo 			 * phase for these overflowed flushers.
208273f53c4aSTejun Heo 			 */
208373f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
208473f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
208573f53c4aSTejun Heo 
208673f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
208773f53c4aSTejun Heo 
208873f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
208973f53c4aSTejun Heo 					      &wq->flusher_queue);
209073f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
209173f53c4aSTejun Heo 		}
209273f53c4aSTejun Heo 
209373f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
209473f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
209573f53c4aSTejun Heo 			break;
209673f53c4aSTejun Heo 		}
209773f53c4aSTejun Heo 
209873f53c4aSTejun Heo 		/*
209973f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
210073f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
210173f53c4aSTejun Heo 		 */
210273f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
210373f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
210473f53c4aSTejun Heo 
210573f53c4aSTejun Heo 		list_del_init(&next->list);
210673f53c4aSTejun Heo 		wq->first_flusher = next;
210773f53c4aSTejun Heo 
210873f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
210973f53c4aSTejun Heo 			break;
211073f53c4aSTejun Heo 
211173f53c4aSTejun Heo 		/*
211273f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
211373f53c4aSTejun Heo 		 * flusher and repeat cascading.
211473f53c4aSTejun Heo 		 */
211573f53c4aSTejun Heo 		wq->first_flusher = NULL;
211673f53c4aSTejun Heo 	}
211773f53c4aSTejun Heo 
211873f53c4aSTejun Heo out_unlock:
211973f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
21201da177e4SLinus Torvalds }
2121ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
21221da177e4SLinus Torvalds 
2123db700897SOleg Nesterov /**
2124db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
2125db700897SOleg Nesterov  * @work: the work which is to be flushed
2126db700897SOleg Nesterov  *
2127a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
2128a67da70dSOleg Nesterov  *
2129db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
2130db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
2131db700897SOleg Nesterov  * sense to use this function.
2132db700897SOleg Nesterov  */
2133db700897SOleg Nesterov int flush_work(struct work_struct *work)
2134db700897SOleg Nesterov {
2135affee4b2STejun Heo 	struct worker *worker = NULL;
21368b03ae3cSTejun Heo 	struct global_cwq *gcwq;
21377a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq;
2138db700897SOleg Nesterov 	struct wq_barrier barr;
2139db700897SOleg Nesterov 
2140db700897SOleg Nesterov 	might_sleep();
21417a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
21427a22ad75STejun Heo 	if (!gcwq)
2143db700897SOleg Nesterov 		return 0;
2144a67da70dSOleg Nesterov 
21458b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2146db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
2147db700897SOleg Nesterov 		/*
2148db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
21497a22ad75STejun Heo 		 * If it was re-queued to a different gcwq under us, we
21507a22ad75STejun Heo 		 * are not going to wait.
2151db700897SOleg Nesterov 		 */
2152db700897SOleg Nesterov 		smp_rmb();
21537a22ad75STejun Heo 		cwq = get_work_cwq(work);
21547a22ad75STejun Heo 		if (unlikely(!cwq || gcwq != cwq->gcwq))
21554690c4abSTejun Heo 			goto already_gone;
2156db700897SOleg Nesterov 	} else {
21577a22ad75STejun Heo 		worker = find_worker_executing_work(gcwq, work);
2158affee4b2STejun Heo 		if (!worker)
21594690c4abSTejun Heo 			goto already_gone;
21607a22ad75STejun Heo 		cwq = worker->current_cwq;
2161db700897SOleg Nesterov 	}
2162db700897SOleg Nesterov 
2163affee4b2STejun Heo 	insert_wq_barrier(cwq, &barr, work, worker);
21648b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
21657a22ad75STejun Heo 
21667a22ad75STejun Heo 	lock_map_acquire(&cwq->wq->lockdep_map);
21677a22ad75STejun Heo 	lock_map_release(&cwq->wq->lockdep_map);
21687a22ad75STejun Heo 
2169db700897SOleg Nesterov 	wait_for_completion(&barr.done);
2170dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
2171db700897SOleg Nesterov 	return 1;
21724690c4abSTejun Heo already_gone:
21738b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
21744690c4abSTejun Heo 	return 0;
2175db700897SOleg Nesterov }
2176db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
2177db700897SOleg Nesterov 
21786e84d644SOleg Nesterov /*
21791f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
21806e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
21816e84d644SOleg Nesterov  */
21826e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
21836e84d644SOleg Nesterov {
21848b03ae3cSTejun Heo 	struct global_cwq *gcwq;
21851f1f642eSOleg Nesterov 	int ret = -1;
21866e84d644SOleg Nesterov 
218722df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
21881f1f642eSOleg Nesterov 		return 0;
21896e84d644SOleg Nesterov 
21906e84d644SOleg Nesterov 	/*
21916e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
21926e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
21936e84d644SOleg Nesterov 	 */
21947a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
21957a22ad75STejun Heo 	if (!gcwq)
21966e84d644SOleg Nesterov 		return ret;
21976e84d644SOleg Nesterov 
21988b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
21996e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
22006e84d644SOleg Nesterov 		/*
22017a22ad75STejun Heo 		 * This work is queued, but perhaps we locked the wrong gcwq.
22026e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
22036e84d644SOleg Nesterov 		 * insert_work()->wmb().
22046e84d644SOleg Nesterov 		 */
22056e84d644SOleg Nesterov 		smp_rmb();
22067a22ad75STejun Heo 		if (gcwq == get_work_gcwq(work)) {
2207dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
22086e84d644SOleg Nesterov 			list_del_init(&work->entry);
22097a22ad75STejun Heo 			cwq_dec_nr_in_flight(get_work_cwq(work),
22107a22ad75STejun Heo 					     get_work_color(work));
22116e84d644SOleg Nesterov 			ret = 1;
22126e84d644SOleg Nesterov 		}
22136e84d644SOleg Nesterov 	}
22148b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
22156e84d644SOleg Nesterov 
22166e84d644SOleg Nesterov 	return ret;
22176e84d644SOleg Nesterov }
22186e84d644SOleg Nesterov 
22197a22ad75STejun Heo static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2220b89deed3SOleg Nesterov {
2221b89deed3SOleg Nesterov 	struct wq_barrier barr;
2222affee4b2STejun Heo 	struct worker *worker;
2223b89deed3SOleg Nesterov 
22248b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2225affee4b2STejun Heo 
22267a22ad75STejun Heo 	worker = find_worker_executing_work(gcwq, work);
22277a22ad75STejun Heo 	if (unlikely(worker))
22287a22ad75STejun Heo 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2229affee4b2STejun Heo 
22308b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
2231b89deed3SOleg Nesterov 
2232affee4b2STejun Heo 	if (unlikely(worker)) {
2233b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
2234dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
2235dc186ad7SThomas Gleixner 	}
2236b89deed3SOleg Nesterov }
2237b89deed3SOleg Nesterov 
22386e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
2239b89deed3SOleg Nesterov {
2240b1f4ec17SOleg Nesterov 	int cpu;
2241b89deed3SOleg Nesterov 
2242f293ea92SOleg Nesterov 	might_sleep();
2243f293ea92SOleg Nesterov 
22443295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
22453295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
22464e6045f1SJohannes Berg 
22471537663fSTejun Heo 	for_each_possible_cpu(cpu)
22487a22ad75STejun Heo 		wait_on_cpu_work(get_gcwq(cpu), work);
22496e84d644SOleg Nesterov }
22506e84d644SOleg Nesterov 
22511f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
22521f1f642eSOleg Nesterov 				struct timer_list* timer)
22531f1f642eSOleg Nesterov {
22541f1f642eSOleg Nesterov 	int ret;
22551f1f642eSOleg Nesterov 
22561f1f642eSOleg Nesterov 	do {
22571f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
22581f1f642eSOleg Nesterov 		if (!ret)
22591f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
22601f1f642eSOleg Nesterov 		wait_on_work(work);
22611f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
22621f1f642eSOleg Nesterov 
22637a22ad75STejun Heo 	clear_work_data(work);
22641f1f642eSOleg Nesterov 	return ret;
22651f1f642eSOleg Nesterov }
22661f1f642eSOleg Nesterov 
22676e84d644SOleg Nesterov /**
22686e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
22696e84d644SOleg Nesterov  * @work: the work which is to be flushed
22706e84d644SOleg Nesterov  *
22711f1f642eSOleg Nesterov  * Returns true if @work was pending.
22721f1f642eSOleg Nesterov  *
22736e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
22746e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
22756e84d644SOleg Nesterov  * has completed.
22766e84d644SOleg Nesterov  *
22776e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
22786e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
22796e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
22806e84d644SOleg Nesterov  * workqueue.
22816e84d644SOleg Nesterov  *
22826e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
22836e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
22846e84d644SOleg Nesterov  *
22856e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
22866e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
22876e84d644SOleg Nesterov  */
22881f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
22896e84d644SOleg Nesterov {
22901f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
2291b89deed3SOleg Nesterov }
229228e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
2293b89deed3SOleg Nesterov 
22946e84d644SOleg Nesterov /**
2295f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
22966e84d644SOleg Nesterov  * @dwork: the delayed work struct
22976e84d644SOleg Nesterov  *
22981f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
22991f1f642eSOleg Nesterov  *
23006e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
23016e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
23026e84d644SOleg Nesterov  */
23031f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
23046e84d644SOleg Nesterov {
23051f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
23066e84d644SOleg Nesterov }
2307f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
23081da177e4SLinus Torvalds 
23096e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
23101da177e4SLinus Torvalds 
23110fcb78c2SRolf Eike Beer /**
23120fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
23130fcb78c2SRolf Eike Beer  * @work: job to be done
23140fcb78c2SRolf Eike Beer  *
23155b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
23165b0f437dSBart Van Assche  * non-zero otherwise.
23175b0f437dSBart Van Assche  *
23185b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
23195b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
23205b0f437dSBart Van Assche  * workqueue otherwise.
23210fcb78c2SRolf Eike Beer  */
23227ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
23231da177e4SLinus Torvalds {
23241da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
23251da177e4SLinus Torvalds }
2326ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
23271da177e4SLinus Torvalds 
2328c1a220e7SZhang Rui /*
2329c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
2330c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
2331c1a220e7SZhang Rui  * @work: job to be done
2332c1a220e7SZhang Rui  *
2333c1a220e7SZhang Rui  * This puts a job on a specific cpu
2334c1a220e7SZhang Rui  */
2335c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
2336c1a220e7SZhang Rui {
2337c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
2338c1a220e7SZhang Rui }
2339c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
2340c1a220e7SZhang Rui 
23410fcb78c2SRolf Eike Beer /**
23420fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
234352bad64dSDavid Howells  * @dwork: job to be done
234452bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
23450fcb78c2SRolf Eike Beer  *
23460fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
23470fcb78c2SRolf Eike Beer  * workqueue.
23480fcb78c2SRolf Eike Beer  */
23497ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
235082f67cd9SIngo Molnar 					unsigned long delay)
23511da177e4SLinus Torvalds {
235252bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
23531da177e4SLinus Torvalds }
2354ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
23551da177e4SLinus Torvalds 
23560fcb78c2SRolf Eike Beer /**
23578c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
23588c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
23598c53e463SLinus Torvalds  *
23608c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
23618c53e463SLinus Torvalds  */
23628c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
23638c53e463SLinus Torvalds {
23648c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
23657a22ad75STejun Heo 		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
23664690c4abSTejun Heo 			     &dwork->work);
23678c53e463SLinus Torvalds 		put_cpu();
23688c53e463SLinus Torvalds 	}
23698c53e463SLinus Torvalds 	flush_work(&dwork->work);
23708c53e463SLinus Torvalds }
23718c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
23728c53e463SLinus Torvalds 
23738c53e463SLinus Torvalds /**
23740fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
23750fcb78c2SRolf Eike Beer  * @cpu: cpu to use
237652bad64dSDavid Howells  * @dwork: job to be done
23770fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
23780fcb78c2SRolf Eike Beer  *
23790fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
23800fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
23810fcb78c2SRolf Eike Beer  */
23821da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
238352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
23841da177e4SLinus Torvalds {
238552bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
23861da177e4SLinus Torvalds }
2387ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
23881da177e4SLinus Torvalds 
2389b6136773SAndrew Morton /**
2390b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
2391b6136773SAndrew Morton  * @func: the function to call
2392b6136773SAndrew Morton  *
2393b6136773SAndrew Morton  * Returns zero on success.
2394b6136773SAndrew Morton  * Returns -ve errno on failure.
2395b6136773SAndrew Morton  *
2396b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
2397b6136773SAndrew Morton  */
239865f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
239915316ba8SChristoph Lameter {
240015316ba8SChristoph Lameter 	int cpu;
240165a64464SAndi Kleen 	int orig = -1;
2402b6136773SAndrew Morton 	struct work_struct *works;
240315316ba8SChristoph Lameter 
2404b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
2405b6136773SAndrew Morton 	if (!works)
240615316ba8SChristoph Lameter 		return -ENOMEM;
2407b6136773SAndrew Morton 
240895402b38SGautham R Shenoy 	get_online_cpus();
240993981800STejun Heo 
241093981800STejun Heo 	/*
241193981800STejun Heo 	 * When running in keventd don't schedule a work item on
241293981800STejun Heo 	 * itself.  Can just call directly because the work queue is
241393981800STejun Heo 	 * already bound.  This also is faster.
241493981800STejun Heo 	 */
241593981800STejun Heo 	if (current_is_keventd())
241693981800STejun Heo 		orig = raw_smp_processor_id();
241793981800STejun Heo 
241815316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
24199bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
24209bfb1839SIngo Molnar 
24219bfb1839SIngo Molnar 		INIT_WORK(work, func);
242293981800STejun Heo 		if (cpu != orig)
24238de6d308SOleg Nesterov 			schedule_work_on(cpu, work);
242415316ba8SChristoph Lameter 	}
242593981800STejun Heo 	if (orig >= 0)
242693981800STejun Heo 		func(per_cpu_ptr(works, orig));
242793981800STejun Heo 
242893981800STejun Heo 	for_each_online_cpu(cpu)
24298616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
243093981800STejun Heo 
243195402b38SGautham R Shenoy 	put_online_cpus();
2432b6136773SAndrew Morton 	free_percpu(works);
243315316ba8SChristoph Lameter 	return 0;
243415316ba8SChristoph Lameter }
243515316ba8SChristoph Lameter 
2436eef6a7d5SAlan Stern /**
2437eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
2438eef6a7d5SAlan Stern  *
2439eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
2440eef6a7d5SAlan Stern  * completion.
2441eef6a7d5SAlan Stern  *
2442eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
2443eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
2444eef6a7d5SAlan Stern  * will lead to deadlock:
2445eef6a7d5SAlan Stern  *
2446eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
2447eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
2448eef6a7d5SAlan Stern  *
2449eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
2450eef6a7d5SAlan Stern  *
2451eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
2452eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
2453eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
2454eef6a7d5SAlan Stern  *
2455eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
2456eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
2457eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
2458eef6a7d5SAlan Stern  * cancel_work_sync() instead.
2459eef6a7d5SAlan Stern  */
24601da177e4SLinus Torvalds void flush_scheduled_work(void)
24611da177e4SLinus Torvalds {
24621da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
24631da177e4SLinus Torvalds }
2464ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
24651da177e4SLinus Torvalds 
24661da177e4SLinus Torvalds /**
24671fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
24681fa44ecaSJames Bottomley  * @fn:		the function to execute
24691fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
24701fa44ecaSJames Bottomley  *		be available when the work executes)
24711fa44ecaSJames Bottomley  *
24721fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
24731fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
24741fa44ecaSJames Bottomley  *
24751fa44ecaSJames Bottomley  * Returns:	0 - function was executed
24761fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
24771fa44ecaSJames Bottomley  */
247865f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
24791fa44ecaSJames Bottomley {
24801fa44ecaSJames Bottomley 	if (!in_interrupt()) {
248165f27f38SDavid Howells 		fn(&ew->work);
24821fa44ecaSJames Bottomley 		return 0;
24831fa44ecaSJames Bottomley 	}
24841fa44ecaSJames Bottomley 
248565f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
24861fa44ecaSJames Bottomley 	schedule_work(&ew->work);
24871fa44ecaSJames Bottomley 
24881fa44ecaSJames Bottomley 	return 1;
24891fa44ecaSJames Bottomley }
24901fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
24911fa44ecaSJames Bottomley 
24921da177e4SLinus Torvalds int keventd_up(void)
24931da177e4SLinus Torvalds {
24941da177e4SLinus Torvalds 	return keventd_wq != NULL;
24951da177e4SLinus Torvalds }
24961da177e4SLinus Torvalds 
24971da177e4SLinus Torvalds int current_is_keventd(void)
24981da177e4SLinus Torvalds {
24997e11629dSTejun Heo 	bool found = false;
25007e11629dSTejun Heo 	unsigned int cpu;
25011da177e4SLinus Torvalds 
25027e11629dSTejun Heo 	/*
25037e11629dSTejun Heo 	 * There no longer is one-to-one relation between worker and
25047e11629dSTejun Heo 	 * work queue and a worker task might be unbound from its cpu
25057e11629dSTejun Heo 	 * if the cpu was offlined.  Match all busy workers.  This
25067e11629dSTejun Heo 	 * function will go away once dynamic pool is implemented.
25077e11629dSTejun Heo 	 */
25087e11629dSTejun Heo 	for_each_possible_cpu(cpu) {
25097e11629dSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
25107e11629dSTejun Heo 		struct worker *worker;
25117e11629dSTejun Heo 		struct hlist_node *pos;
25127e11629dSTejun Heo 		unsigned long flags;
25137e11629dSTejun Heo 		int i;
25141da177e4SLinus Torvalds 
25157e11629dSTejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
25161da177e4SLinus Torvalds 
25177e11629dSTejun Heo 		for_each_busy_worker(worker, i, pos, gcwq) {
25187e11629dSTejun Heo 			if (worker->task == current) {
25197e11629dSTejun Heo 				found = true;
25207e11629dSTejun Heo 				break;
25217e11629dSTejun Heo 			}
25227e11629dSTejun Heo 		}
25231da177e4SLinus Torvalds 
25247e11629dSTejun Heo 		spin_unlock_irqrestore(&gcwq->lock, flags);
25257e11629dSTejun Heo 		if (found)
25267e11629dSTejun Heo 			break;
25277e11629dSTejun Heo 	}
25287e11629dSTejun Heo 
25297e11629dSTejun Heo 	return found;
25301da177e4SLinus Torvalds }
25311da177e4SLinus Torvalds 
25320f900049STejun Heo static struct cpu_workqueue_struct *alloc_cwqs(void)
25330f900049STejun Heo {
25340f900049STejun Heo 	/*
25350f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
25360f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
25370f900049STejun Heo 	 * unsigned long long.
25380f900049STejun Heo 	 */
25390f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
25400f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
25410f900049STejun Heo 				   __alignof__(unsigned long long));
25420f900049STejun Heo 	struct cpu_workqueue_struct *cwqs;
25430f900049STejun Heo #ifndef CONFIG_SMP
25440f900049STejun Heo 	void *ptr;
25450f900049STejun Heo 
25460f900049STejun Heo 	/*
25470f900049STejun Heo 	 * On UP, percpu allocator doesn't honor alignment parameter
25480f900049STejun Heo 	 * and simply uses arch-dependent default.  Allocate enough
25490f900049STejun Heo 	 * room to align cwq and put an extra pointer at the end
25500f900049STejun Heo 	 * pointing back to the originally allocated pointer which
25510f900049STejun Heo 	 * will be used for free.
25520f900049STejun Heo 	 *
25530f900049STejun Heo 	 * FIXME: This really belongs to UP percpu code.  Update UP
25540f900049STejun Heo 	 * percpu code to honor alignment and remove this ugliness.
25550f900049STejun Heo 	 */
25560f900049STejun Heo 	ptr = __alloc_percpu(size + align + sizeof(void *), 1);
25570f900049STejun Heo 	cwqs = PTR_ALIGN(ptr, align);
25580f900049STejun Heo 	*(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
25590f900049STejun Heo #else
25600f900049STejun Heo 	/* On SMP, percpu allocator can do it itself */
25610f900049STejun Heo 	cwqs = __alloc_percpu(size, align);
25620f900049STejun Heo #endif
25630f900049STejun Heo 	/* just in case, make sure it's actually aligned */
25640f900049STejun Heo 	BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
25650f900049STejun Heo 	return cwqs;
25660f900049STejun Heo }
25670f900049STejun Heo 
25680f900049STejun Heo static void free_cwqs(struct cpu_workqueue_struct *cwqs)
25690f900049STejun Heo {
25700f900049STejun Heo #ifndef CONFIG_SMP
25710f900049STejun Heo 	/* on UP, the pointer to free is stored right after the cwq */
25720f900049STejun Heo 	if (cwqs)
25730f900049STejun Heo 		free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
25740f900049STejun Heo #else
25750f900049STejun Heo 	free_percpu(cwqs);
25760f900049STejun Heo #endif
25770f900049STejun Heo }
25780f900049STejun Heo 
25794e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
258097e37d7bSTejun Heo 						unsigned int flags,
25811e19ffc6STejun Heo 						int max_active,
2582eb13ba87SJohannes Berg 						struct lock_class_key *key,
2583eb13ba87SJohannes Berg 						const char *lock_name)
25843af24433SOleg Nesterov {
25853af24433SOleg Nesterov 	struct workqueue_struct *wq;
2586c34056a3STejun Heo 	unsigned int cpu;
25873af24433SOleg Nesterov 
25881e19ffc6STejun Heo 	max_active = clamp_val(max_active, 1, INT_MAX);
25891e19ffc6STejun Heo 
25903af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
25913af24433SOleg Nesterov 	if (!wq)
25924690c4abSTejun Heo 		goto err;
25933af24433SOleg Nesterov 
25940f900049STejun Heo 	wq->cpu_wq = alloc_cwqs();
25954690c4abSTejun Heo 	if (!wq->cpu_wq)
25964690c4abSTejun Heo 		goto err;
25973af24433SOleg Nesterov 
259897e37d7bSTejun Heo 	wq->flags = flags;
2599a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
260073f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
260173f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
260273f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
260373f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
2604502ca9d8STejun Heo 	wq->single_cpu = NR_CPUS;
2605502ca9d8STejun Heo 
26063af24433SOleg Nesterov 	wq->name = name;
2607eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2608cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
26093af24433SOleg Nesterov 
26103af24433SOleg Nesterov 	for_each_possible_cpu(cpu) {
26111537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
26128b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
26131537663fSTejun Heo 
26140f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
26158b03ae3cSTejun Heo 		cwq->gcwq = gcwq;
2616c34056a3STejun Heo 		cwq->wq = wq;
261773f53c4aSTejun Heo 		cwq->flush_color = -1;
26181e19ffc6STejun Heo 		cwq->max_active = max_active;
26191e19ffc6STejun Heo 		INIT_LIST_HEAD(&cwq->delayed_works);
2620*e22bee78STejun Heo 	}
26211537663fSTejun Heo 
2622*e22bee78STejun Heo 	if (flags & WQ_RESCUER) {
2623*e22bee78STejun Heo 		struct worker *rescuer;
2624*e22bee78STejun Heo 
2625*e22bee78STejun Heo 		if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL))
2626*e22bee78STejun Heo 			goto err;
2627*e22bee78STejun Heo 
2628*e22bee78STejun Heo 		wq->rescuer = rescuer = alloc_worker();
2629*e22bee78STejun Heo 		if (!rescuer)
2630*e22bee78STejun Heo 			goto err;
2631*e22bee78STejun Heo 
2632*e22bee78STejun Heo 		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2633*e22bee78STejun Heo 		if (IS_ERR(rescuer->task))
2634*e22bee78STejun Heo 			goto err;
2635*e22bee78STejun Heo 
2636*e22bee78STejun Heo 		wq->rescuer = rescuer;
2637*e22bee78STejun Heo 		rescuer->task->flags |= PF_THREAD_BOUND;
2638*e22bee78STejun Heo 		wake_up_process(rescuer->task);
26393af24433SOleg Nesterov 	}
26401537663fSTejun Heo 
2641a0a1a5fdSTejun Heo 	/*
2642a0a1a5fdSTejun Heo 	 * workqueue_lock protects global freeze state and workqueues
2643a0a1a5fdSTejun Heo 	 * list.  Grab it, set max_active accordingly and add the new
2644a0a1a5fdSTejun Heo 	 * workqueue to workqueues list.
2645a0a1a5fdSTejun Heo 	 */
26461537663fSTejun Heo 	spin_lock(&workqueue_lock);
2647a0a1a5fdSTejun Heo 
2648a0a1a5fdSTejun Heo 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2649a0a1a5fdSTejun Heo 		for_each_possible_cpu(cpu)
2650a0a1a5fdSTejun Heo 			get_cwq(cpu, wq)->max_active = 0;
2651a0a1a5fdSTejun Heo 
26521537663fSTejun Heo 	list_add(&wq->list, &workqueues);
2653a0a1a5fdSTejun Heo 
26541537663fSTejun Heo 	spin_unlock(&workqueue_lock);
26551537663fSTejun Heo 
26563af24433SOleg Nesterov 	return wq;
26574690c4abSTejun Heo err:
26584690c4abSTejun Heo 	if (wq) {
26590f900049STejun Heo 		free_cwqs(wq->cpu_wq);
2660*e22bee78STejun Heo 		free_cpumask_var(wq->mayday_mask);
2661*e22bee78STejun Heo 		kfree(wq->rescuer);
26624690c4abSTejun Heo 		kfree(wq);
26634690c4abSTejun Heo 	}
26644690c4abSTejun Heo 	return NULL;
26653af24433SOleg Nesterov }
26664e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
26673af24433SOleg Nesterov 
26683af24433SOleg Nesterov /**
26693af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
26703af24433SOleg Nesterov  * @wq: target workqueue
26713af24433SOleg Nesterov  *
26723af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
26733af24433SOleg Nesterov  */
26743af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
26753af24433SOleg Nesterov {
2676c8e55f36STejun Heo 	unsigned int cpu;
26773af24433SOleg Nesterov 
2678a0a1a5fdSTejun Heo 	flush_workqueue(wq);
2679a0a1a5fdSTejun Heo 
2680a0a1a5fdSTejun Heo 	/*
2681a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
2682a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
2683a0a1a5fdSTejun Heo 	 */
268495402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
26853af24433SOleg Nesterov 	list_del(&wq->list);
268695402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
26873af24433SOleg Nesterov 
2688*e22bee78STejun Heo 	/* sanity check */
268973f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
269073f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
269173f53c4aSTejun Heo 		int i;
269273f53c4aSTejun Heo 
269373f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
269473f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
26951e19ffc6STejun Heo 		BUG_ON(cwq->nr_active);
26961e19ffc6STejun Heo 		BUG_ON(!list_empty(&cwq->delayed_works));
269773f53c4aSTejun Heo 	}
26981537663fSTejun Heo 
2699*e22bee78STejun Heo 	if (wq->flags & WQ_RESCUER) {
2700*e22bee78STejun Heo 		kthread_stop(wq->rescuer->task);
2701*e22bee78STejun Heo 		free_cpumask_var(wq->mayday_mask);
2702*e22bee78STejun Heo 	}
2703*e22bee78STejun Heo 
27040f900049STejun Heo 	free_cwqs(wq->cpu_wq);
27053af24433SOleg Nesterov 	kfree(wq);
27063af24433SOleg Nesterov }
27073af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
27083af24433SOleg Nesterov 
2709db7bccf4STejun Heo /*
2710db7bccf4STejun Heo  * CPU hotplug.
2711db7bccf4STejun Heo  *
2712*e22bee78STejun Heo  * There are two challenges in supporting CPU hotplug.  Firstly, there
2713*e22bee78STejun Heo  * are a lot of assumptions on strong associations among work, cwq and
2714*e22bee78STejun Heo  * gcwq which make migrating pending and scheduled works very
2715*e22bee78STejun Heo  * difficult to implement without impacting hot paths.  Secondly,
2716*e22bee78STejun Heo  * gcwqs serve mix of short, long and very long running works making
2717*e22bee78STejun Heo  * blocked draining impractical.
2718*e22bee78STejun Heo  *
2719*e22bee78STejun Heo  * This is solved by allowing a gcwq to be detached from CPU, running
2720*e22bee78STejun Heo  * it with unbound (rogue) workers and allowing it to be reattached
2721*e22bee78STejun Heo  * later if the cpu comes back online.  A separate thread is created
2722*e22bee78STejun Heo  * to govern a gcwq in such state and is called the trustee of the
2723*e22bee78STejun Heo  * gcwq.
2724db7bccf4STejun Heo  *
2725db7bccf4STejun Heo  * Trustee states and their descriptions.
2726db7bccf4STejun Heo  *
2727db7bccf4STejun Heo  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
2728db7bccf4STejun Heo  *		new trustee is started with this state.
2729db7bccf4STejun Heo  *
2730db7bccf4STejun Heo  * IN_CHARGE	Once started, trustee will enter this state after
2731*e22bee78STejun Heo  *		assuming the manager role and making all existing
2732*e22bee78STejun Heo  *		workers rogue.  DOWN_PREPARE waits for trustee to
2733*e22bee78STejun Heo  *		enter this state.  After reaching IN_CHARGE, trustee
2734*e22bee78STejun Heo  *		tries to execute the pending worklist until it's empty
2735*e22bee78STejun Heo  *		and the state is set to BUTCHER, or the state is set
2736*e22bee78STejun Heo  *		to RELEASE.
2737db7bccf4STejun Heo  *
2738db7bccf4STejun Heo  * BUTCHER	Command state which is set by the cpu callback after
2739db7bccf4STejun Heo  *		the cpu has went down.  Once this state is set trustee
2740db7bccf4STejun Heo  *		knows that there will be no new works on the worklist
2741db7bccf4STejun Heo  *		and once the worklist is empty it can proceed to
2742db7bccf4STejun Heo  *		killing idle workers.
2743db7bccf4STejun Heo  *
2744db7bccf4STejun Heo  * RELEASE	Command state which is set by the cpu callback if the
2745db7bccf4STejun Heo  *		cpu down has been canceled or it has come online
2746db7bccf4STejun Heo  *		again.  After recognizing this state, trustee stops
2747*e22bee78STejun Heo  *		trying to drain or butcher and clears ROGUE, rebinds
2748*e22bee78STejun Heo  *		all remaining workers back to the cpu and releases
2749*e22bee78STejun Heo  *		manager role.
2750db7bccf4STejun Heo  *
2751db7bccf4STejun Heo  * DONE		Trustee will enter this state after BUTCHER or RELEASE
2752db7bccf4STejun Heo  *		is complete.
2753db7bccf4STejun Heo  *
2754db7bccf4STejun Heo  *          trustee                 CPU                draining
2755db7bccf4STejun Heo  *         took over                down               complete
2756db7bccf4STejun Heo  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2757db7bccf4STejun Heo  *                        |                     |                  ^
2758db7bccf4STejun Heo  *                        | CPU is back online  v   return workers |
2759db7bccf4STejun Heo  *                         ----------------> RELEASE --------------
2760db7bccf4STejun Heo  */
2761db7bccf4STejun Heo 
2762db7bccf4STejun Heo /**
2763db7bccf4STejun Heo  * trustee_wait_event_timeout - timed event wait for trustee
2764db7bccf4STejun Heo  * @cond: condition to wait for
2765db7bccf4STejun Heo  * @timeout: timeout in jiffies
2766db7bccf4STejun Heo  *
2767db7bccf4STejun Heo  * wait_event_timeout() for trustee to use.  Handles locking and
2768db7bccf4STejun Heo  * checks for RELEASE request.
2769db7bccf4STejun Heo  *
2770db7bccf4STejun Heo  * CONTEXT:
2771db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2772db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2773db7bccf4STejun Heo  *
2774db7bccf4STejun Heo  * RETURNS:
2775db7bccf4STejun Heo  * Positive indicating left time if @cond is satisfied, 0 if timed
2776db7bccf4STejun Heo  * out, -1 if canceled.
2777db7bccf4STejun Heo  */
2778db7bccf4STejun Heo #define trustee_wait_event_timeout(cond, timeout) ({			\
2779db7bccf4STejun Heo 	long __ret = (timeout);						\
2780db7bccf4STejun Heo 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
2781db7bccf4STejun Heo 	       __ret) {							\
2782db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);				\
2783db7bccf4STejun Heo 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
2784db7bccf4STejun Heo 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
2785db7bccf4STejun Heo 			__ret);						\
2786db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);				\
2787db7bccf4STejun Heo 	}								\
2788db7bccf4STejun Heo 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
2789db7bccf4STejun Heo })
2790db7bccf4STejun Heo 
2791db7bccf4STejun Heo /**
2792db7bccf4STejun Heo  * trustee_wait_event - event wait for trustee
2793db7bccf4STejun Heo  * @cond: condition to wait for
2794db7bccf4STejun Heo  *
2795db7bccf4STejun Heo  * wait_event() for trustee to use.  Automatically handles locking and
2796db7bccf4STejun Heo  * checks for CANCEL request.
2797db7bccf4STejun Heo  *
2798db7bccf4STejun Heo  * CONTEXT:
2799db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2800db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2801db7bccf4STejun Heo  *
2802db7bccf4STejun Heo  * RETURNS:
2803db7bccf4STejun Heo  * 0 if @cond is satisfied, -1 if canceled.
2804db7bccf4STejun Heo  */
2805db7bccf4STejun Heo #define trustee_wait_event(cond) ({					\
2806db7bccf4STejun Heo 	long __ret1;							\
2807db7bccf4STejun Heo 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2808db7bccf4STejun Heo 	__ret1 < 0 ? -1 : 0;						\
2809db7bccf4STejun Heo })
2810db7bccf4STejun Heo 
2811db7bccf4STejun Heo static int __cpuinit trustee_thread(void *__gcwq)
2812db7bccf4STejun Heo {
2813db7bccf4STejun Heo 	struct global_cwq *gcwq = __gcwq;
2814db7bccf4STejun Heo 	struct worker *worker;
2815*e22bee78STejun Heo 	struct work_struct *work;
2816db7bccf4STejun Heo 	struct hlist_node *pos;
2817*e22bee78STejun Heo 	long rc;
2818db7bccf4STejun Heo 	int i;
2819db7bccf4STejun Heo 
2820db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2821db7bccf4STejun Heo 
2822db7bccf4STejun Heo 	spin_lock_irq(&gcwq->lock);
2823db7bccf4STejun Heo 	/*
2824*e22bee78STejun Heo 	 * Claim the manager position and make all workers rogue.
2825*e22bee78STejun Heo 	 * Trustee must be bound to the target cpu and can't be
2826*e22bee78STejun Heo 	 * cancelled.
2827db7bccf4STejun Heo 	 */
2828db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2829*e22bee78STejun Heo 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
2830*e22bee78STejun Heo 	BUG_ON(rc < 0);
2831*e22bee78STejun Heo 
2832*e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
2833db7bccf4STejun Heo 
2834db7bccf4STejun Heo 	list_for_each_entry(worker, &gcwq->idle_list, entry)
2835d302f017STejun Heo 		worker_set_flags(worker, WORKER_ROGUE, false);
2836db7bccf4STejun Heo 
2837db7bccf4STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq)
2838d302f017STejun Heo 		worker_set_flags(worker, WORKER_ROGUE, false);
2839db7bccf4STejun Heo 
2840db7bccf4STejun Heo 	/*
2841*e22bee78STejun Heo 	 * Call schedule() so that we cross rq->lock and thus can
2842*e22bee78STejun Heo 	 * guarantee sched callbacks see the rogue flag.  This is
2843*e22bee78STejun Heo 	 * necessary as scheduler callbacks may be invoked from other
2844*e22bee78STejun Heo 	 * cpus.
2845*e22bee78STejun Heo 	 */
2846*e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
2847*e22bee78STejun Heo 	schedule();
2848*e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
2849*e22bee78STejun Heo 
2850*e22bee78STejun Heo 	/*
2851*e22bee78STejun Heo 	 * Sched callbacks are disabled now.  gcwq->nr_running should
2852*e22bee78STejun Heo 	 * be zero and will stay that way, making need_more_worker()
2853*e22bee78STejun Heo 	 * and keep_working() always return true as long as the
2854*e22bee78STejun Heo 	 * worklist is not empty.
2855*e22bee78STejun Heo 	 */
2856*e22bee78STejun Heo 	WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0);
2857*e22bee78STejun Heo 
2858*e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
2859*e22bee78STejun Heo 	del_timer_sync(&gcwq->idle_timer);
2860*e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
2861*e22bee78STejun Heo 
2862*e22bee78STejun Heo 	/*
2863db7bccf4STejun Heo 	 * We're now in charge.  Notify and proceed to drain.  We need
2864db7bccf4STejun Heo 	 * to keep the gcwq running during the whole CPU down
2865db7bccf4STejun Heo 	 * procedure as other cpu hotunplug callbacks may need to
2866db7bccf4STejun Heo 	 * flush currently running tasks.
2867db7bccf4STejun Heo 	 */
2868db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2869db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
2870db7bccf4STejun Heo 
2871db7bccf4STejun Heo 	/*
2872db7bccf4STejun Heo 	 * The original cpu is in the process of dying and may go away
2873db7bccf4STejun Heo 	 * anytime now.  When that happens, we and all workers would
2874*e22bee78STejun Heo 	 * be migrated to other cpus.  Try draining any left work.  We
2875*e22bee78STejun Heo 	 * want to get it over with ASAP - spam rescuers, wake up as
2876*e22bee78STejun Heo 	 * many idlers as necessary and create new ones till the
2877*e22bee78STejun Heo 	 * worklist is empty.  Note that if the gcwq is frozen, there
2878*e22bee78STejun Heo 	 * may be frozen works in freezeable cwqs.  Don't declare
2879*e22bee78STejun Heo 	 * completion while frozen.
2880db7bccf4STejun Heo 	 */
2881db7bccf4STejun Heo 	while (gcwq->nr_workers != gcwq->nr_idle ||
2882db7bccf4STejun Heo 	       gcwq->flags & GCWQ_FREEZING ||
2883db7bccf4STejun Heo 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2884*e22bee78STejun Heo 		int nr_works = 0;
2885*e22bee78STejun Heo 
2886*e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry) {
2887*e22bee78STejun Heo 			send_mayday(work);
2888*e22bee78STejun Heo 			nr_works++;
2889*e22bee78STejun Heo 		}
2890*e22bee78STejun Heo 
2891*e22bee78STejun Heo 		list_for_each_entry(worker, &gcwq->idle_list, entry) {
2892*e22bee78STejun Heo 			if (!nr_works--)
2893*e22bee78STejun Heo 				break;
2894*e22bee78STejun Heo 			wake_up_process(worker->task);
2895*e22bee78STejun Heo 		}
2896*e22bee78STejun Heo 
2897*e22bee78STejun Heo 		if (need_to_create_worker(gcwq)) {
2898*e22bee78STejun Heo 			spin_unlock_irq(&gcwq->lock);
2899*e22bee78STejun Heo 			worker = create_worker(gcwq, false);
2900*e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
2901*e22bee78STejun Heo 			if (worker) {
2902*e22bee78STejun Heo 				worker_set_flags(worker, WORKER_ROGUE, false);
2903*e22bee78STejun Heo 				start_worker(worker);
2904*e22bee78STejun Heo 			}
2905*e22bee78STejun Heo 		}
2906*e22bee78STejun Heo 
2907db7bccf4STejun Heo 		/* give a breather */
2908db7bccf4STejun Heo 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2909db7bccf4STejun Heo 			break;
2910db7bccf4STejun Heo 	}
2911db7bccf4STejun Heo 
2912*e22bee78STejun Heo 	/*
2913*e22bee78STejun Heo 	 * Either all works have been scheduled and cpu is down, or
2914*e22bee78STejun Heo 	 * cpu down has already been canceled.  Wait for and butcher
2915*e22bee78STejun Heo 	 * all workers till we're canceled.
2916*e22bee78STejun Heo 	 */
2917*e22bee78STejun Heo 	do {
2918*e22bee78STejun Heo 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
2919*e22bee78STejun Heo 		while (!list_empty(&gcwq->idle_list))
2920*e22bee78STejun Heo 			destroy_worker(list_first_entry(&gcwq->idle_list,
2921*e22bee78STejun Heo 							struct worker, entry));
2922*e22bee78STejun Heo 	} while (gcwq->nr_workers && rc >= 0);
2923*e22bee78STejun Heo 
2924*e22bee78STejun Heo 	/*
2925*e22bee78STejun Heo 	 * At this point, either draining has completed and no worker
2926*e22bee78STejun Heo 	 * is left, or cpu down has been canceled or the cpu is being
2927*e22bee78STejun Heo 	 * brought back up.  There shouldn't be any idle one left.
2928*e22bee78STejun Heo 	 * Tell the remaining busy ones to rebind once it finishes the
2929*e22bee78STejun Heo 	 * currently scheduled works by scheduling the rebind_work.
2930*e22bee78STejun Heo 	 */
2931*e22bee78STejun Heo 	WARN_ON(!list_empty(&gcwq->idle_list));
2932*e22bee78STejun Heo 
2933*e22bee78STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq) {
2934*e22bee78STejun Heo 		struct work_struct *rebind_work = &worker->rebind_work;
2935*e22bee78STejun Heo 
2936*e22bee78STejun Heo 		/*
2937*e22bee78STejun Heo 		 * Rebind_work may race with future cpu hotplug
2938*e22bee78STejun Heo 		 * operations.  Use a separate flag to mark that
2939*e22bee78STejun Heo 		 * rebinding is scheduled.
2940*e22bee78STejun Heo 		 */
2941*e22bee78STejun Heo 		worker_set_flags(worker, WORKER_REBIND, false);
2942*e22bee78STejun Heo 		worker_clr_flags(worker, WORKER_ROGUE);
2943*e22bee78STejun Heo 
2944*e22bee78STejun Heo 		/* queue rebind_work, wq doesn't matter, use the default one */
2945*e22bee78STejun Heo 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
2946*e22bee78STejun Heo 				     work_data_bits(rebind_work)))
2947*e22bee78STejun Heo 			continue;
2948*e22bee78STejun Heo 
2949*e22bee78STejun Heo 		debug_work_activate(rebind_work);
2950*e22bee78STejun Heo 		insert_work(get_cwq(gcwq->cpu, keventd_wq), rebind_work,
2951*e22bee78STejun Heo 			    worker->scheduled.next,
2952*e22bee78STejun Heo 			    work_color_to_flags(WORK_NO_COLOR));
2953*e22bee78STejun Heo 	}
2954*e22bee78STejun Heo 
2955*e22bee78STejun Heo 	/* relinquish manager role */
2956*e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
2957*e22bee78STejun Heo 
2958db7bccf4STejun Heo 	/* notify completion */
2959db7bccf4STejun Heo 	gcwq->trustee = NULL;
2960db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_DONE;
2961db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
2962db7bccf4STejun Heo 	spin_unlock_irq(&gcwq->lock);
2963db7bccf4STejun Heo 	return 0;
2964db7bccf4STejun Heo }
2965db7bccf4STejun Heo 
2966db7bccf4STejun Heo /**
2967db7bccf4STejun Heo  * wait_trustee_state - wait for trustee to enter the specified state
2968db7bccf4STejun Heo  * @gcwq: gcwq the trustee of interest belongs to
2969db7bccf4STejun Heo  * @state: target state to wait for
2970db7bccf4STejun Heo  *
2971db7bccf4STejun Heo  * Wait for the trustee to reach @state.  DONE is already matched.
2972db7bccf4STejun Heo  *
2973db7bccf4STejun Heo  * CONTEXT:
2974db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2975db7bccf4STejun Heo  * multiple times.  To be used by cpu_callback.
2976db7bccf4STejun Heo  */
2977db7bccf4STejun Heo static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2978db7bccf4STejun Heo {
2979db7bccf4STejun Heo 	if (!(gcwq->trustee_state == state ||
2980db7bccf4STejun Heo 	      gcwq->trustee_state == TRUSTEE_DONE)) {
2981db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);
2982db7bccf4STejun Heo 		__wait_event(gcwq->trustee_wait,
2983db7bccf4STejun Heo 			     gcwq->trustee_state == state ||
2984db7bccf4STejun Heo 			     gcwq->trustee_state == TRUSTEE_DONE);
2985db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);
2986db7bccf4STejun Heo 	}
2987db7bccf4STejun Heo }
2988db7bccf4STejun Heo 
29899c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
29901da177e4SLinus Torvalds 						unsigned long action,
29911da177e4SLinus Torvalds 						void *hcpu)
29921da177e4SLinus Torvalds {
29933af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
2994db7bccf4STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
2995db7bccf4STejun Heo 	struct task_struct *new_trustee = NULL;
2996*e22bee78STejun Heo 	struct worker *uninitialized_var(new_worker);
2997db7bccf4STejun Heo 	unsigned long flags;
29981da177e4SLinus Torvalds 
29998bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
30008bb78442SRafael J. Wysocki 
3001db7bccf4STejun Heo 	switch (action) {
3002db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3003db7bccf4STejun Heo 		new_trustee = kthread_create(trustee_thread, gcwq,
3004db7bccf4STejun Heo 					     "workqueue_trustee/%d\n", cpu);
3005db7bccf4STejun Heo 		if (IS_ERR(new_trustee))
3006db7bccf4STejun Heo 			return notifier_from_errno(PTR_ERR(new_trustee));
3007db7bccf4STejun Heo 		kthread_bind(new_trustee, cpu);
3008*e22bee78STejun Heo 		/* fall through */
3009*e22bee78STejun Heo 	case CPU_UP_PREPARE:
3010*e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3011*e22bee78STejun Heo 		new_worker = create_worker(gcwq, false);
3012*e22bee78STejun Heo 		if (!new_worker) {
3013*e22bee78STejun Heo 			if (new_trustee)
3014*e22bee78STejun Heo 				kthread_stop(new_trustee);
3015*e22bee78STejun Heo 			return NOTIFY_BAD;
3016*e22bee78STejun Heo 		}
3017db7bccf4STejun Heo 	}
30181537663fSTejun Heo 
3019db7bccf4STejun Heo 	/* some are called w/ irq disabled, don't disturb irq status */
3020db7bccf4STejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
30213af24433SOleg Nesterov 
30223af24433SOleg Nesterov 	switch (action) {
3023db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3024db7bccf4STejun Heo 		/* initialize trustee and tell it to acquire the gcwq */
3025db7bccf4STejun Heo 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3026db7bccf4STejun Heo 		gcwq->trustee = new_trustee;
3027db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_START;
3028db7bccf4STejun Heo 		wake_up_process(gcwq->trustee);
3029db7bccf4STejun Heo 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3030*e22bee78STejun Heo 		/* fall through */
3031*e22bee78STejun Heo 	case CPU_UP_PREPARE:
3032*e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3033*e22bee78STejun Heo 		gcwq->first_idle = new_worker;
3034*e22bee78STejun Heo 		break;
3035*e22bee78STejun Heo 
3036*e22bee78STejun Heo 	case CPU_DYING:
3037*e22bee78STejun Heo 		/*
3038*e22bee78STejun Heo 		 * Before this, the trustee and all workers except for
3039*e22bee78STejun Heo 		 * the ones which are still executing works from
3040*e22bee78STejun Heo 		 * before the last CPU down must be on the cpu.  After
3041*e22bee78STejun Heo 		 * this, they'll all be diasporas.
3042*e22bee78STejun Heo 		 */
3043*e22bee78STejun Heo 		gcwq->flags |= GCWQ_DISASSOCIATED;
3044db7bccf4STejun Heo 		break;
3045db7bccf4STejun Heo 
30463da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
3047db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_BUTCHER;
3048*e22bee78STejun Heo 		/* fall through */
3049*e22bee78STejun Heo 	case CPU_UP_CANCELED:
3050*e22bee78STejun Heo 		destroy_worker(gcwq->first_idle);
3051*e22bee78STejun Heo 		gcwq->first_idle = NULL;
3052db7bccf4STejun Heo 		break;
3053db7bccf4STejun Heo 
3054db7bccf4STejun Heo 	case CPU_DOWN_FAILED:
3055db7bccf4STejun Heo 	case CPU_ONLINE:
3056*e22bee78STejun Heo 		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3057db7bccf4STejun Heo 		if (gcwq->trustee_state != TRUSTEE_DONE) {
3058db7bccf4STejun Heo 			gcwq->trustee_state = TRUSTEE_RELEASE;
3059db7bccf4STejun Heo 			wake_up_process(gcwq->trustee);
3060db7bccf4STejun Heo 			wait_trustee_state(gcwq, TRUSTEE_DONE);
3061db7bccf4STejun Heo 		}
3062db7bccf4STejun Heo 
3063*e22bee78STejun Heo 		/*
3064*e22bee78STejun Heo 		 * Trustee is done and there might be no worker left.
3065*e22bee78STejun Heo 		 * Put the first_idle in and request a real manager to
3066*e22bee78STejun Heo 		 * take a look.
3067*e22bee78STejun Heo 		 */
3068*e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3069*e22bee78STejun Heo 		kthread_bind(gcwq->first_idle->task, cpu);
3070*e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3071*e22bee78STejun Heo 		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3072*e22bee78STejun Heo 		start_worker(gcwq->first_idle);
3073*e22bee78STejun Heo 		gcwq->first_idle = NULL;
30741da177e4SLinus Torvalds 		break;
30751da177e4SLinus Torvalds 	}
3076db7bccf4STejun Heo 
3077db7bccf4STejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
30781da177e4SLinus Torvalds 
30791537663fSTejun Heo 	return notifier_from_errno(0);
30801da177e4SLinus Torvalds }
30811da177e4SLinus Torvalds 
30822d3854a3SRusty Russell #ifdef CONFIG_SMP
30838ccad40dSRusty Russell 
30842d3854a3SRusty Russell struct work_for_cpu {
30856b44003eSAndrew Morton 	struct completion completion;
30862d3854a3SRusty Russell 	long (*fn)(void *);
30872d3854a3SRusty Russell 	void *arg;
30882d3854a3SRusty Russell 	long ret;
30892d3854a3SRusty Russell };
30902d3854a3SRusty Russell 
30916b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
30922d3854a3SRusty Russell {
30936b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
30942d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
30956b44003eSAndrew Morton 	complete(&wfc->completion);
30966b44003eSAndrew Morton 	return 0;
30972d3854a3SRusty Russell }
30982d3854a3SRusty Russell 
30992d3854a3SRusty Russell /**
31002d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
31012d3854a3SRusty Russell  * @cpu: the cpu to run on
31022d3854a3SRusty Russell  * @fn: the function to run
31032d3854a3SRusty Russell  * @arg: the function arg
31042d3854a3SRusty Russell  *
310531ad9081SRusty Russell  * This will return the value @fn returns.
310631ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
31076b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
31082d3854a3SRusty Russell  */
31092d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
31102d3854a3SRusty Russell {
31116b44003eSAndrew Morton 	struct task_struct *sub_thread;
31126b44003eSAndrew Morton 	struct work_for_cpu wfc = {
31136b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
31146b44003eSAndrew Morton 		.fn = fn,
31156b44003eSAndrew Morton 		.arg = arg,
31166b44003eSAndrew Morton 	};
31172d3854a3SRusty Russell 
31186b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
31196b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
31206b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
31216b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
31226b44003eSAndrew Morton 	wake_up_process(sub_thread);
31236b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
31242d3854a3SRusty Russell 	return wfc.ret;
31252d3854a3SRusty Russell }
31262d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
31272d3854a3SRusty Russell #endif /* CONFIG_SMP */
31282d3854a3SRusty Russell 
3129a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
3130a0a1a5fdSTejun Heo 
3131a0a1a5fdSTejun Heo /**
3132a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
3133a0a1a5fdSTejun Heo  *
3134a0a1a5fdSTejun Heo  * Start freezing workqueues.  After this function returns, all
3135a0a1a5fdSTejun Heo  * freezeable workqueues will queue new works to their frozen_works
31367e11629dSTejun Heo  * list instead of gcwq->worklist.
3137a0a1a5fdSTejun Heo  *
3138a0a1a5fdSTejun Heo  * CONTEXT:
31398b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3140a0a1a5fdSTejun Heo  */
3141a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
3142a0a1a5fdSTejun Heo {
3143a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
3144a0a1a5fdSTejun Heo 	unsigned int cpu;
3145a0a1a5fdSTejun Heo 
3146a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3147a0a1a5fdSTejun Heo 
3148a0a1a5fdSTejun Heo 	BUG_ON(workqueue_freezing);
3149a0a1a5fdSTejun Heo 	workqueue_freezing = true;
3150a0a1a5fdSTejun Heo 
3151a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
31528b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
31538b03ae3cSTejun Heo 
31548b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
31558b03ae3cSTejun Heo 
3156db7bccf4STejun Heo 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3157db7bccf4STejun Heo 		gcwq->flags |= GCWQ_FREEZING;
3158db7bccf4STejun Heo 
3159a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3160a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3161a0a1a5fdSTejun Heo 
3162a0a1a5fdSTejun Heo 			if (wq->flags & WQ_FREEZEABLE)
3163a0a1a5fdSTejun Heo 				cwq->max_active = 0;
3164a0a1a5fdSTejun Heo 		}
31658b03ae3cSTejun Heo 
31668b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3167a0a1a5fdSTejun Heo 	}
3168a0a1a5fdSTejun Heo 
3169a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3170a0a1a5fdSTejun Heo }
3171a0a1a5fdSTejun Heo 
3172a0a1a5fdSTejun Heo /**
3173a0a1a5fdSTejun Heo  * freeze_workqueues_busy - are freezeable workqueues still busy?
3174a0a1a5fdSTejun Heo  *
3175a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
3176a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
3177a0a1a5fdSTejun Heo  *
3178a0a1a5fdSTejun Heo  * CONTEXT:
3179a0a1a5fdSTejun Heo  * Grabs and releases workqueue_lock.
3180a0a1a5fdSTejun Heo  *
3181a0a1a5fdSTejun Heo  * RETURNS:
3182a0a1a5fdSTejun Heo  * %true if some freezeable workqueues are still busy.  %false if
3183a0a1a5fdSTejun Heo  * freezing is complete.
3184a0a1a5fdSTejun Heo  */
3185a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
3186a0a1a5fdSTejun Heo {
3187a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
3188a0a1a5fdSTejun Heo 	unsigned int cpu;
3189a0a1a5fdSTejun Heo 	bool busy = false;
3190a0a1a5fdSTejun Heo 
3191a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3192a0a1a5fdSTejun Heo 
3193a0a1a5fdSTejun Heo 	BUG_ON(!workqueue_freezing);
3194a0a1a5fdSTejun Heo 
3195a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
3196a0a1a5fdSTejun Heo 		/*
3197a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
3198a0a1a5fdSTejun Heo 		 * to peek without lock.
3199a0a1a5fdSTejun Heo 		 */
3200a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3201a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3202a0a1a5fdSTejun Heo 
3203a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
3204a0a1a5fdSTejun Heo 				continue;
3205a0a1a5fdSTejun Heo 
3206a0a1a5fdSTejun Heo 			BUG_ON(cwq->nr_active < 0);
3207a0a1a5fdSTejun Heo 			if (cwq->nr_active) {
3208a0a1a5fdSTejun Heo 				busy = true;
3209a0a1a5fdSTejun Heo 				goto out_unlock;
3210a0a1a5fdSTejun Heo 			}
3211a0a1a5fdSTejun Heo 		}
3212a0a1a5fdSTejun Heo 	}
3213a0a1a5fdSTejun Heo out_unlock:
3214a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3215a0a1a5fdSTejun Heo 	return busy;
3216a0a1a5fdSTejun Heo }
3217a0a1a5fdSTejun Heo 
3218a0a1a5fdSTejun Heo /**
3219a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
3220a0a1a5fdSTejun Heo  *
3221a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
32227e11629dSTejun Heo  * frozen works are transferred to their respective gcwq worklists.
3223a0a1a5fdSTejun Heo  *
3224a0a1a5fdSTejun Heo  * CONTEXT:
32258b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3226a0a1a5fdSTejun Heo  */
3227a0a1a5fdSTejun Heo void thaw_workqueues(void)
3228a0a1a5fdSTejun Heo {
3229a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
3230a0a1a5fdSTejun Heo 	unsigned int cpu;
3231a0a1a5fdSTejun Heo 
3232a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3233a0a1a5fdSTejun Heo 
3234a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
3235a0a1a5fdSTejun Heo 		goto out_unlock;
3236a0a1a5fdSTejun Heo 
3237a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
32388b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
32398b03ae3cSTejun Heo 
32408b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
32418b03ae3cSTejun Heo 
3242db7bccf4STejun Heo 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3243db7bccf4STejun Heo 		gcwq->flags &= ~GCWQ_FREEZING;
3244db7bccf4STejun Heo 
3245a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3246a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3247a0a1a5fdSTejun Heo 
3248a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
3249a0a1a5fdSTejun Heo 				continue;
3250a0a1a5fdSTejun Heo 
3251a0a1a5fdSTejun Heo 			/* restore max_active and repopulate worklist */
3252a0a1a5fdSTejun Heo 			cwq->max_active = wq->saved_max_active;
3253a0a1a5fdSTejun Heo 
3254a0a1a5fdSTejun Heo 			while (!list_empty(&cwq->delayed_works) &&
3255a0a1a5fdSTejun Heo 			       cwq->nr_active < cwq->max_active)
3256a0a1a5fdSTejun Heo 				cwq_activate_first_delayed(cwq);
3257a0a1a5fdSTejun Heo 
3258502ca9d8STejun Heo 			/* perform delayed unbind from single cpu if empty */
3259502ca9d8STejun Heo 			if (wq->single_cpu == gcwq->cpu &&
3260502ca9d8STejun Heo 			    !cwq->nr_active && list_empty(&cwq->delayed_works))
3261502ca9d8STejun Heo 				cwq_unbind_single_cpu(cwq);
3262a0a1a5fdSTejun Heo 		}
32638b03ae3cSTejun Heo 
3264*e22bee78STejun Heo 		wake_up_worker(gcwq);
3265*e22bee78STejun Heo 
32668b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3267a0a1a5fdSTejun Heo 	}
3268a0a1a5fdSTejun Heo 
3269a0a1a5fdSTejun Heo 	workqueue_freezing = false;
3270a0a1a5fdSTejun Heo out_unlock:
3271a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3272a0a1a5fdSTejun Heo }
3273a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
3274a0a1a5fdSTejun Heo 
3275c12920d1SOleg Nesterov void __init init_workqueues(void)
32761da177e4SLinus Torvalds {
3277c34056a3STejun Heo 	unsigned int cpu;
3278c8e55f36STejun Heo 	int i;
3279c34056a3STejun Heo 
32807a22ad75STejun Heo 	/*
32817a22ad75STejun Heo 	 * The pointer part of work->data is either pointing to the
32827a22ad75STejun Heo 	 * cwq or contains the cpu number the work ran last on.  Make
32837a22ad75STejun Heo 	 * sure cpu number won't overflow into kernel pointer area so
32847a22ad75STejun Heo 	 * that they can be distinguished.
32857a22ad75STejun Heo 	 */
32867a22ad75STejun Heo 	BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
32877a22ad75STejun Heo 
3288db7bccf4STejun Heo 	hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
32898b03ae3cSTejun Heo 
32908b03ae3cSTejun Heo 	/* initialize gcwqs */
32918b03ae3cSTejun Heo 	for_each_possible_cpu(cpu) {
32928b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
32938b03ae3cSTejun Heo 
32948b03ae3cSTejun Heo 		spin_lock_init(&gcwq->lock);
32957e11629dSTejun Heo 		INIT_LIST_HEAD(&gcwq->worklist);
32968b03ae3cSTejun Heo 		gcwq->cpu = cpu;
32978b03ae3cSTejun Heo 
3298c8e55f36STejun Heo 		INIT_LIST_HEAD(&gcwq->idle_list);
3299c8e55f36STejun Heo 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3300c8e55f36STejun Heo 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3301c8e55f36STejun Heo 
3302*e22bee78STejun Heo 		init_timer_deferrable(&gcwq->idle_timer);
3303*e22bee78STejun Heo 		gcwq->idle_timer.function = idle_worker_timeout;
3304*e22bee78STejun Heo 		gcwq->idle_timer.data = (unsigned long)gcwq;
3305*e22bee78STejun Heo 
3306*e22bee78STejun Heo 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3307*e22bee78STejun Heo 			    (unsigned long)gcwq);
3308*e22bee78STejun Heo 
33098b03ae3cSTejun Heo 		ida_init(&gcwq->worker_ida);
3310db7bccf4STejun Heo 
3311db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_DONE;
3312db7bccf4STejun Heo 		init_waitqueue_head(&gcwq->trustee_wait);
33138b03ae3cSTejun Heo 	}
33148b03ae3cSTejun Heo 
3315*e22bee78STejun Heo 	/* create the initial worker */
3316*e22bee78STejun Heo 	for_each_online_cpu(cpu) {
3317*e22bee78STejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3318*e22bee78STejun Heo 		struct worker *worker;
3319*e22bee78STejun Heo 
3320*e22bee78STejun Heo 		worker = create_worker(gcwq, true);
3321*e22bee78STejun Heo 		BUG_ON(!worker);
3322*e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3323*e22bee78STejun Heo 		start_worker(worker);
3324*e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3325*e22bee78STejun Heo 	}
3326*e22bee78STejun Heo 
33271da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
33281da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
33291da177e4SLinus Torvalds }
3330