xref: /linux-6.15/kernel/workqueue.c (revision 06bd6ebf)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36c34056a3STejun Heo #include <linux/idr.h>
37e22bee78STejun Heo 
38e22bee78STejun Heo #include "workqueue_sched.h"
391da177e4SLinus Torvalds 
40c8e55f36STejun Heo enum {
41db7bccf4STejun Heo 	/* global_cwq flags */
42e22bee78STejun Heo 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
43e22bee78STejun Heo 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
44e22bee78STejun Heo 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
45db7bccf4STejun Heo 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
46649027d7STejun Heo 	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
47db7bccf4STejun Heo 
48c8e55f36STejun Heo 	/* worker flags */
49c8e55f36STejun Heo 	WORKER_STARTED		= 1 << 0,	/* started */
50c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
51c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
52e22bee78STejun Heo 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
53db7bccf4STejun Heo 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
54e22bee78STejun Heo 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
55fb0e7bebSTejun Heo 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
56f3421797STejun Heo 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
57e22bee78STejun Heo 
58fb0e7bebSTejun Heo 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
59f3421797STejun Heo 				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
60db7bccf4STejun Heo 
61db7bccf4STejun Heo 	/* gcwq->trustee_state */
62db7bccf4STejun Heo 	TRUSTEE_START		= 0,		/* start */
63db7bccf4STejun Heo 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
64db7bccf4STejun Heo 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
65db7bccf4STejun Heo 	TRUSTEE_RELEASE		= 3,		/* release workers */
66db7bccf4STejun Heo 	TRUSTEE_DONE		= 4,		/* trustee is done */
67c8e55f36STejun Heo 
68c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
69c8e55f36STejun Heo 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
70c8e55f36STejun Heo 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
71db7bccf4STejun Heo 
72e22bee78STejun Heo 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
73e22bee78STejun Heo 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
74e22bee78STejun Heo 
75e22bee78STejun Heo 	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
76e22bee78STejun Heo 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
77e22bee78STejun Heo 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
78db7bccf4STejun Heo 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
791da177e4SLinus Torvalds 
801da177e4SLinus Torvalds 	/*
81e22bee78STejun Heo 	 * Rescue workers are used only on emergencies and shared by
82e22bee78STejun Heo 	 * all cpus.  Give -20.
83e22bee78STejun Heo 	 */
84e22bee78STejun Heo 	RESCUER_NICE_LEVEL	= -20,
85c8e55f36STejun Heo };
86c8e55f36STejun Heo 
871da177e4SLinus Torvalds /*
884690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
894690c4abSTejun Heo  *
904690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
914690c4abSTejun Heo  *
92e22bee78STejun Heo  * P: Preemption protected.  Disabling preemption is enough and should
93e22bee78STejun Heo  *    only be modified and accessed from the local cpu.
94e22bee78STejun Heo  *
958b03ae3cSTejun Heo  * L: gcwq->lock protected.  Access with gcwq->lock held.
964690c4abSTejun Heo  *
97e22bee78STejun Heo  * X: During normal operation, modification requires gcwq->lock and
98e22bee78STejun Heo  *    should be done only from local cpu.  Either disabling preemption
99e22bee78STejun Heo  *    on local cpu or grabbing gcwq->lock is enough for read access.
100f3421797STejun Heo  *    If GCWQ_DISASSOCIATED is set, it's identical to L.
101e22bee78STejun Heo  *
10273f53c4aSTejun Heo  * F: wq->flush_mutex protected.
10373f53c4aSTejun Heo  *
1044690c4abSTejun Heo  * W: workqueue_lock protected.
1054690c4abSTejun Heo  */
1064690c4abSTejun Heo 
1078b03ae3cSTejun Heo struct global_cwq;
108c34056a3STejun Heo 
109e22bee78STejun Heo /*
110e22bee78STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers
111e22bee78STejun Heo  * are either serving the manager role, on idle list or on busy hash.
112e22bee78STejun Heo  */
113c34056a3STejun Heo struct worker {
114c8e55f36STejun Heo 	/* on idle list while idle, on busy hash table while busy */
115c8e55f36STejun Heo 	union {
116c8e55f36STejun Heo 		struct list_head	entry;	/* L: while idle */
117c8e55f36STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
118c8e55f36STejun Heo 	};
119c8e55f36STejun Heo 
120c34056a3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
1218cca0eeaSTejun Heo 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
122affee4b2STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
123c34056a3STejun Heo 	struct task_struct	*task;		/* I: worker task */
1248b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
125e22bee78STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
126e22bee78STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
127e22bee78STejun Heo 	unsigned int		flags;		/* X: flags */
128c34056a3STejun Heo 	int			id;		/* I: worker id */
129e22bee78STejun Heo 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
130c34056a3STejun Heo };
131c34056a3STejun Heo 
1324690c4abSTejun Heo /*
133e22bee78STejun Heo  * Global per-cpu workqueue.  There's one and only one for each cpu
134e22bee78STejun Heo  * and all works are queued and processed here regardless of their
135e22bee78STejun Heo  * target workqueues.
1368b03ae3cSTejun Heo  */
1378b03ae3cSTejun Heo struct global_cwq {
1388b03ae3cSTejun Heo 	spinlock_t		lock;		/* the gcwq lock */
1397e11629dSTejun Heo 	struct list_head	worklist;	/* L: list of pending works */
1408b03ae3cSTejun Heo 	unsigned int		cpu;		/* I: the associated cpu */
141db7bccf4STejun Heo 	unsigned int		flags;		/* L: GCWQ_* flags */
142c8e55f36STejun Heo 
143c8e55f36STejun Heo 	int			nr_workers;	/* L: total number of workers */
144c8e55f36STejun Heo 	int			nr_idle;	/* L: currently idle ones */
145c8e55f36STejun Heo 
146c8e55f36STejun Heo 	/* workers are chained either in the idle_list or busy_hash */
147e22bee78STejun Heo 	struct list_head	idle_list;	/* X: list of idle workers */
148c8e55f36STejun Heo 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
149c8e55f36STejun Heo 						/* L: hash of busy workers */
150c8e55f36STejun Heo 
151e22bee78STejun Heo 	struct timer_list	idle_timer;	/* L: worker idle timeout */
152e22bee78STejun Heo 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
153e22bee78STejun Heo 
1548b03ae3cSTejun Heo 	struct ida		worker_ida;	/* L: for worker IDs */
155db7bccf4STejun Heo 
156db7bccf4STejun Heo 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
157db7bccf4STejun Heo 	unsigned int		trustee_state;	/* L: trustee state */
158db7bccf4STejun Heo 	wait_queue_head_t	trustee_wait;	/* trustee wait */
159e22bee78STejun Heo 	struct worker		*first_idle;	/* L: first idle worker */
1608b03ae3cSTejun Heo } ____cacheline_aligned_in_smp;
1618b03ae3cSTejun Heo 
1628b03ae3cSTejun Heo /*
163502ca9d8STejun Heo  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
1640f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
1650f900049STejun Heo  * aligned at two's power of the number of flag bits.
1661da177e4SLinus Torvalds  */
1671da177e4SLinus Torvalds struct cpu_workqueue_struct {
1688b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
1694690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
17073f53c4aSTejun Heo 	int			work_color;	/* L: current color */
17173f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
17273f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
17373f53c4aSTejun Heo 						/* L: nr of in_flight works */
1741e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
175a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
1761e19ffc6STejun Heo 	struct list_head	delayed_works;	/* L: delayed works */
1770f900049STejun Heo };
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /*
18073f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
18173f53c4aSTejun Heo  */
18273f53c4aSTejun Heo struct wq_flusher {
18373f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
18473f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
18573f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
18673f53c4aSTejun Heo };
1871da177e4SLinus Torvalds 
18873f53c4aSTejun Heo /*
189f2e005aaSTejun Heo  * All cpumasks are assumed to be always set on UP and thus can't be
190f2e005aaSTejun Heo  * used to determine whether there's something to be done.
191f2e005aaSTejun Heo  */
192f2e005aaSTejun Heo #ifdef CONFIG_SMP
193f2e005aaSTejun Heo typedef cpumask_var_t mayday_mask_t;
194f2e005aaSTejun Heo #define mayday_test_and_set_cpu(cpu, mask)	\
195f2e005aaSTejun Heo 	cpumask_test_and_set_cpu((cpu), (mask))
196f2e005aaSTejun Heo #define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask))
197f2e005aaSTejun Heo #define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask))
198f2e005aaSTejun Heo #define alloc_mayday_mask(maskp, gfp)		alloc_cpumask_var((maskp), (gfp))
199f2e005aaSTejun Heo #define free_mayday_mask(mask)			free_cpumask_var((mask))
200f2e005aaSTejun Heo #else
201f2e005aaSTejun Heo typedef unsigned long mayday_mask_t;
202f2e005aaSTejun Heo #define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask))
203f2e005aaSTejun Heo #define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask))
204f2e005aaSTejun Heo #define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask))
205f2e005aaSTejun Heo #define alloc_mayday_mask(maskp, gfp)		true
206f2e005aaSTejun Heo #define free_mayday_mask(mask)			do { } while (0)
207f2e005aaSTejun Heo #endif
2081da177e4SLinus Torvalds 
2091da177e4SLinus Torvalds /*
2101da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
2111da177e4SLinus Torvalds  * per-CPU workqueues:
2121da177e4SLinus Torvalds  */
2131da177e4SLinus Torvalds struct workqueue_struct {
21497e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
215bdbc5dd7STejun Heo 	union {
216bdbc5dd7STejun Heo 		struct cpu_workqueue_struct __percpu	*pcpu;
217bdbc5dd7STejun Heo 		struct cpu_workqueue_struct		*single;
218bdbc5dd7STejun Heo 		unsigned long				v;
219bdbc5dd7STejun Heo 	} cpu_wq;				/* I: cwq's */
2204690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
22173f53c4aSTejun Heo 
22273f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
22373f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
22473f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
22573f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
22673f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
22773f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
22873f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
22973f53c4aSTejun Heo 
230f2e005aaSTejun Heo 	mayday_mask_t		mayday_mask;	/* cpus requesting rescue */
231e22bee78STejun Heo 	struct worker		*rescuer;	/* I: rescue worker */
232e22bee78STejun Heo 
233dcd989cbSTejun Heo 	int			saved_max_active; /* W: saved cwq max_active */
2344690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
2354e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
2364e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
2374e6045f1SJohannes Berg #endif
2381da177e4SLinus Torvalds };
2391da177e4SLinus Torvalds 
240d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly;
241d320c038STejun Heo struct workqueue_struct *system_long_wq __read_mostly;
242d320c038STejun Heo struct workqueue_struct *system_nrt_wq __read_mostly;
243f3421797STejun Heo struct workqueue_struct *system_unbound_wq __read_mostly;
244d320c038STejun Heo EXPORT_SYMBOL_GPL(system_wq);
245d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq);
246d320c038STejun Heo EXPORT_SYMBOL_GPL(system_nrt_wq);
247f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq);
248d320c038STejun Heo 
249db7bccf4STejun Heo #define for_each_busy_worker(worker, i, pos, gcwq)			\
250db7bccf4STejun Heo 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
251db7bccf4STejun Heo 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
252db7bccf4STejun Heo 
253f3421797STejun Heo static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
254f3421797STejun Heo 				  unsigned int sw)
255f3421797STejun Heo {
256f3421797STejun Heo 	if (cpu < nr_cpu_ids) {
257f3421797STejun Heo 		if (sw & 1) {
258f3421797STejun Heo 			cpu = cpumask_next(cpu, mask);
259f3421797STejun Heo 			if (cpu < nr_cpu_ids)
260f3421797STejun Heo 				return cpu;
261f3421797STejun Heo 		}
262f3421797STejun Heo 		if (sw & 2)
263f3421797STejun Heo 			return WORK_CPU_UNBOUND;
264f3421797STejun Heo 	}
265f3421797STejun Heo 	return WORK_CPU_NONE;
266f3421797STejun Heo }
267f3421797STejun Heo 
268f3421797STejun Heo static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
269f3421797STejun Heo 				struct workqueue_struct *wq)
270f3421797STejun Heo {
271f3421797STejun Heo 	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
272f3421797STejun Heo }
273f3421797STejun Heo 
27409884951STejun Heo /*
27509884951STejun Heo  * CPU iterators
27609884951STejun Heo  *
27709884951STejun Heo  * An extra gcwq is defined for an invalid cpu number
27809884951STejun Heo  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
27909884951STejun Heo  * specific CPU.  The following iterators are similar to
28009884951STejun Heo  * for_each_*_cpu() iterators but also considers the unbound gcwq.
28109884951STejun Heo  *
28209884951STejun Heo  * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
28309884951STejun Heo  * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND
28409884951STejun Heo  * for_each_cwq_cpu()		: possible CPUs for bound workqueues,
28509884951STejun Heo  *				  WORK_CPU_UNBOUND for unbound workqueues
28609884951STejun Heo  */
287f3421797STejun Heo #define for_each_gcwq_cpu(cpu)						\
288f3421797STejun Heo 	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
289f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
290f3421797STejun Heo 	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
291f3421797STejun Heo 
292f3421797STejun Heo #define for_each_online_gcwq_cpu(cpu)					\
293f3421797STejun Heo 	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
294f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
295f3421797STejun Heo 	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
296f3421797STejun Heo 
297f3421797STejun Heo #define for_each_cwq_cpu(cpu, wq)					\
298f3421797STejun Heo 	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
299f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
300f3421797STejun Heo 	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
301f3421797STejun Heo 
302a25909a4SPaul E. McKenney #ifdef CONFIG_LOCKDEP
303a25909a4SPaul E. McKenney /**
304a25909a4SPaul E. McKenney  * in_workqueue_context() - in context of specified workqueue?
305a25909a4SPaul E. McKenney  * @wq: the workqueue of interest
306a25909a4SPaul E. McKenney  *
307a25909a4SPaul E. McKenney  * Checks lockdep state to see if the current task is executing from
308a25909a4SPaul E. McKenney  * within a workqueue item.  This function exists only if lockdep is
309a25909a4SPaul E. McKenney  * enabled.
310a25909a4SPaul E. McKenney  */
311a25909a4SPaul E. McKenney int in_workqueue_context(struct workqueue_struct *wq)
312a25909a4SPaul E. McKenney {
313a25909a4SPaul E. McKenney 	return lock_is_held(&wq->lockdep_map);
314a25909a4SPaul E. McKenney }
315a25909a4SPaul E. McKenney #endif
316a25909a4SPaul E. McKenney 
317dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
318dc186ad7SThomas Gleixner 
319dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
320dc186ad7SThomas Gleixner 
321dc186ad7SThomas Gleixner /*
322dc186ad7SThomas Gleixner  * fixup_init is called when:
323dc186ad7SThomas Gleixner  * - an active object is initialized
324dc186ad7SThomas Gleixner  */
325dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
326dc186ad7SThomas Gleixner {
327dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
328dc186ad7SThomas Gleixner 
329dc186ad7SThomas Gleixner 	switch (state) {
330dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
331dc186ad7SThomas Gleixner 		cancel_work_sync(work);
332dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
333dc186ad7SThomas Gleixner 		return 1;
334dc186ad7SThomas Gleixner 	default:
335dc186ad7SThomas Gleixner 		return 0;
336dc186ad7SThomas Gleixner 	}
337dc186ad7SThomas Gleixner }
338dc186ad7SThomas Gleixner 
339dc186ad7SThomas Gleixner /*
340dc186ad7SThomas Gleixner  * fixup_activate is called when:
341dc186ad7SThomas Gleixner  * - an active object is activated
342dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
343dc186ad7SThomas Gleixner  */
344dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
345dc186ad7SThomas Gleixner {
346dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
347dc186ad7SThomas Gleixner 
348dc186ad7SThomas Gleixner 	switch (state) {
349dc186ad7SThomas Gleixner 
350dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
351dc186ad7SThomas Gleixner 		/*
352dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
353dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
354dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
355dc186ad7SThomas Gleixner 		 */
35622df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
357dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
358dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
359dc186ad7SThomas Gleixner 			return 0;
360dc186ad7SThomas Gleixner 		}
361dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
362dc186ad7SThomas Gleixner 		return 0;
363dc186ad7SThomas Gleixner 
364dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
365dc186ad7SThomas Gleixner 		WARN_ON(1);
366dc186ad7SThomas Gleixner 
367dc186ad7SThomas Gleixner 	default:
368dc186ad7SThomas Gleixner 		return 0;
369dc186ad7SThomas Gleixner 	}
370dc186ad7SThomas Gleixner }
371dc186ad7SThomas Gleixner 
372dc186ad7SThomas Gleixner /*
373dc186ad7SThomas Gleixner  * fixup_free is called when:
374dc186ad7SThomas Gleixner  * - an active object is freed
375dc186ad7SThomas Gleixner  */
376dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
377dc186ad7SThomas Gleixner {
378dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
379dc186ad7SThomas Gleixner 
380dc186ad7SThomas Gleixner 	switch (state) {
381dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
382dc186ad7SThomas Gleixner 		cancel_work_sync(work);
383dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
384dc186ad7SThomas Gleixner 		return 1;
385dc186ad7SThomas Gleixner 	default:
386dc186ad7SThomas Gleixner 		return 0;
387dc186ad7SThomas Gleixner 	}
388dc186ad7SThomas Gleixner }
389dc186ad7SThomas Gleixner 
390dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
391dc186ad7SThomas Gleixner 	.name		= "work_struct",
392dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
393dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
394dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
395dc186ad7SThomas Gleixner };
396dc186ad7SThomas Gleixner 
397dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
398dc186ad7SThomas Gleixner {
399dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
400dc186ad7SThomas Gleixner }
401dc186ad7SThomas Gleixner 
402dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
403dc186ad7SThomas Gleixner {
404dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
405dc186ad7SThomas Gleixner }
406dc186ad7SThomas Gleixner 
407dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
408dc186ad7SThomas Gleixner {
409dc186ad7SThomas Gleixner 	if (onstack)
410dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
411dc186ad7SThomas Gleixner 	else
412dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
413dc186ad7SThomas Gleixner }
414dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
415dc186ad7SThomas Gleixner 
416dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
417dc186ad7SThomas Gleixner {
418dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
419dc186ad7SThomas Gleixner }
420dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
421dc186ad7SThomas Gleixner 
422dc186ad7SThomas Gleixner #else
423dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
424dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
425dc186ad7SThomas Gleixner #endif
426dc186ad7SThomas Gleixner 
42795402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
42895402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
4291da177e4SLinus Torvalds static LIST_HEAD(workqueues);
430a0a1a5fdSTejun Heo static bool workqueue_freezing;		/* W: have wqs started freezing? */
4311da177e4SLinus Torvalds 
43214441960SOleg Nesterov /*
433e22bee78STejun Heo  * The almighty global cpu workqueues.  nr_running is the only field
434e22bee78STejun Heo  * which is expected to be used frequently by other cpus via
435e22bee78STejun Heo  * try_to_wake_up().  Put it in a separate cacheline.
43614441960SOleg Nesterov  */
4378b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq);
438e22bee78STejun Heo static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
439f756d5e2SNathan Lynch 
440f3421797STejun Heo /*
441f3421797STejun Heo  * Global cpu workqueue and nr_running counter for unbound gcwq.  The
442f3421797STejun Heo  * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
443f3421797STejun Heo  * workers have WORKER_UNBOUND set.
444f3421797STejun Heo  */
445f3421797STejun Heo static struct global_cwq unbound_global_cwq;
446f3421797STejun Heo static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */
447f3421797STejun Heo 
448c34056a3STejun Heo static int worker_thread(void *__worker);
4491da177e4SLinus Torvalds 
4508b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu)
4511da177e4SLinus Torvalds {
452f3421797STejun Heo 	if (cpu != WORK_CPU_UNBOUND)
4538b03ae3cSTejun Heo 		return &per_cpu(global_cwq, cpu);
454f3421797STejun Heo 	else
455f3421797STejun Heo 		return &unbound_global_cwq;
4561da177e4SLinus Torvalds }
4571da177e4SLinus Torvalds 
458e22bee78STejun Heo static atomic_t *get_gcwq_nr_running(unsigned int cpu)
459b1f4ec17SOleg Nesterov {
460f3421797STejun Heo 	if (cpu != WORK_CPU_UNBOUND)
461e22bee78STejun Heo 		return &per_cpu(gcwq_nr_running, cpu);
462f3421797STejun Heo 	else
463f3421797STejun Heo 		return &unbound_gcwq_nr_running;
464b1f4ec17SOleg Nesterov }
465b1f4ec17SOleg Nesterov 
4664690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
4674690c4abSTejun Heo 					    struct workqueue_struct *wq)
468a848e3b6SOleg Nesterov {
469f3421797STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
470f3421797STejun Heo 		if (likely(cpu < nr_cpu_ids)) {
471f3421797STejun Heo #ifdef CONFIG_SMP
472bdbc5dd7STejun Heo 			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
473f3421797STejun Heo #else
474f3421797STejun Heo 			return wq->cpu_wq.single;
475bdbc5dd7STejun Heo #endif
476a848e3b6SOleg Nesterov 		}
477f3421797STejun Heo 	} else if (likely(cpu == WORK_CPU_UNBOUND))
478f3421797STejun Heo 		return wq->cpu_wq.single;
479f3421797STejun Heo 	return NULL;
480f3421797STejun Heo }
481a848e3b6SOleg Nesterov 
48273f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
48373f53c4aSTejun Heo {
48473f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
48573f53c4aSTejun Heo }
48673f53c4aSTejun Heo 
48773f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
48873f53c4aSTejun Heo {
48973f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
49073f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
49173f53c4aSTejun Heo }
49273f53c4aSTejun Heo 
49373f53c4aSTejun Heo static int work_next_color(int color)
49473f53c4aSTejun Heo {
49573f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
4961da177e4SLinus Torvalds }
4971da177e4SLinus Torvalds 
4984594bf15SDavid Howells /*
499e120153dSTejun Heo  * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
500e120153dSTejun Heo  * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
501e120153dSTejun Heo  * cleared and the work data contains the cpu number it was last on.
5027a22ad75STejun Heo  *
5037a22ad75STejun Heo  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
5047a22ad75STejun Heo  * cwq, cpu or clear work->data.  These functions should only be
5057a22ad75STejun Heo  * called while the work is owned - ie. while the PENDING bit is set.
5067a22ad75STejun Heo  *
5077a22ad75STejun Heo  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
5087a22ad75STejun Heo  * corresponding to a work.  gcwq is available once the work has been
5097a22ad75STejun Heo  * queued anywhere after initialization.  cwq is available only from
5107a22ad75STejun Heo  * queueing until execution starts.
5114594bf15SDavid Howells  */
5127a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data,
5137a22ad75STejun Heo 				 unsigned long flags)
5147a22ad75STejun Heo {
5157a22ad75STejun Heo 	BUG_ON(!work_pending(work));
5167a22ad75STejun Heo 	atomic_long_set(&work->data, data | flags | work_static(work));
5177a22ad75STejun Heo }
5187a22ad75STejun Heo 
5197a22ad75STejun Heo static void set_work_cwq(struct work_struct *work,
5204690c4abSTejun Heo 			 struct cpu_workqueue_struct *cwq,
5214690c4abSTejun Heo 			 unsigned long extra_flags)
522365970a1SDavid Howells {
5237a22ad75STejun Heo 	set_work_data(work, (unsigned long)cwq,
524e120153dSTejun Heo 		      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
525365970a1SDavid Howells }
526365970a1SDavid Howells 
5277a22ad75STejun Heo static void set_work_cpu(struct work_struct *work, unsigned int cpu)
5284d707b9fSOleg Nesterov {
5297a22ad75STejun Heo 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
5304d707b9fSOleg Nesterov }
5314d707b9fSOleg Nesterov 
5327a22ad75STejun Heo static void clear_work_data(struct work_struct *work)
533365970a1SDavid Howells {
5347a22ad75STejun Heo 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
5357a22ad75STejun Heo }
5367a22ad75STejun Heo 
5377a22ad75STejun Heo static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
5387a22ad75STejun Heo {
539e120153dSTejun Heo 	unsigned long data = atomic_long_read(&work->data);
5407a22ad75STejun Heo 
541e120153dSTejun Heo 	if (data & WORK_STRUCT_CWQ)
542e120153dSTejun Heo 		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
543e120153dSTejun Heo 	else
544e120153dSTejun Heo 		return NULL;
5457a22ad75STejun Heo }
5467a22ad75STejun Heo 
5477a22ad75STejun Heo static struct global_cwq *get_work_gcwq(struct work_struct *work)
5487a22ad75STejun Heo {
549e120153dSTejun Heo 	unsigned long data = atomic_long_read(&work->data);
5507a22ad75STejun Heo 	unsigned int cpu;
5517a22ad75STejun Heo 
552e120153dSTejun Heo 	if (data & WORK_STRUCT_CWQ)
553e120153dSTejun Heo 		return ((struct cpu_workqueue_struct *)
554e120153dSTejun Heo 			(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
5557a22ad75STejun Heo 
5567a22ad75STejun Heo 	cpu = data >> WORK_STRUCT_FLAG_BITS;
557bdbc5dd7STejun Heo 	if (cpu == WORK_CPU_NONE)
5587a22ad75STejun Heo 		return NULL;
5597a22ad75STejun Heo 
560f3421797STejun Heo 	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
5617a22ad75STejun Heo 	return get_gcwq(cpu);
562365970a1SDavid Howells }
563365970a1SDavid Howells 
564e22bee78STejun Heo /*
565e22bee78STejun Heo  * Policy functions.  These define the policies on how the global
566e22bee78STejun Heo  * worker pool is managed.  Unless noted otherwise, these functions
567e22bee78STejun Heo  * assume that they're being called with gcwq->lock held.
568e22bee78STejun Heo  */
569e22bee78STejun Heo 
570649027d7STejun Heo static bool __need_more_worker(struct global_cwq *gcwq)
571649027d7STejun Heo {
572649027d7STejun Heo 	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
573649027d7STejun Heo 		gcwq->flags & GCWQ_HIGHPRI_PENDING;
574649027d7STejun Heo }
575649027d7STejun Heo 
576e22bee78STejun Heo /*
577e22bee78STejun Heo  * Need to wake up a worker?  Called from anything but currently
578e22bee78STejun Heo  * running workers.
579e22bee78STejun Heo  */
580e22bee78STejun Heo static bool need_more_worker(struct global_cwq *gcwq)
581e22bee78STejun Heo {
582649027d7STejun Heo 	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
583e22bee78STejun Heo }
584e22bee78STejun Heo 
585e22bee78STejun Heo /* Can I start working?  Called from busy but !running workers. */
586e22bee78STejun Heo static bool may_start_working(struct global_cwq *gcwq)
587e22bee78STejun Heo {
588e22bee78STejun Heo 	return gcwq->nr_idle;
589e22bee78STejun Heo }
590e22bee78STejun Heo 
591e22bee78STejun Heo /* Do I need to keep working?  Called from currently running workers. */
592e22bee78STejun Heo static bool keep_working(struct global_cwq *gcwq)
593e22bee78STejun Heo {
594e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
595e22bee78STejun Heo 
596e22bee78STejun Heo 	return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
597e22bee78STejun Heo }
598e22bee78STejun Heo 
599e22bee78STejun Heo /* Do we need a new worker?  Called from manager. */
600e22bee78STejun Heo static bool need_to_create_worker(struct global_cwq *gcwq)
601e22bee78STejun Heo {
602e22bee78STejun Heo 	return need_more_worker(gcwq) && !may_start_working(gcwq);
603e22bee78STejun Heo }
604e22bee78STejun Heo 
605e22bee78STejun Heo /* Do I need to be the manager? */
606e22bee78STejun Heo static bool need_to_manage_workers(struct global_cwq *gcwq)
607e22bee78STejun Heo {
608e22bee78STejun Heo 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
609e22bee78STejun Heo }
610e22bee78STejun Heo 
611e22bee78STejun Heo /* Do we have too many workers and should some go away? */
612e22bee78STejun Heo static bool too_many_workers(struct global_cwq *gcwq)
613e22bee78STejun Heo {
614e22bee78STejun Heo 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
615e22bee78STejun Heo 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
616e22bee78STejun Heo 	int nr_busy = gcwq->nr_workers - nr_idle;
617e22bee78STejun Heo 
618e22bee78STejun Heo 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
619e22bee78STejun Heo }
620e22bee78STejun Heo 
621e22bee78STejun Heo /*
622e22bee78STejun Heo  * Wake up functions.
623e22bee78STejun Heo  */
624e22bee78STejun Heo 
6257e11629dSTejun Heo /* Return the first worker.  Safe with preemption disabled */
6267e11629dSTejun Heo static struct worker *first_worker(struct global_cwq *gcwq)
6277e11629dSTejun Heo {
6287e11629dSTejun Heo 	if (unlikely(list_empty(&gcwq->idle_list)))
6297e11629dSTejun Heo 		return NULL;
6307e11629dSTejun Heo 
6317e11629dSTejun Heo 	return list_first_entry(&gcwq->idle_list, struct worker, entry);
6327e11629dSTejun Heo }
6337e11629dSTejun Heo 
6347e11629dSTejun Heo /**
6357e11629dSTejun Heo  * wake_up_worker - wake up an idle worker
6367e11629dSTejun Heo  * @gcwq: gcwq to wake worker for
6377e11629dSTejun Heo  *
6387e11629dSTejun Heo  * Wake up the first idle worker of @gcwq.
6397e11629dSTejun Heo  *
6407e11629dSTejun Heo  * CONTEXT:
6417e11629dSTejun Heo  * spin_lock_irq(gcwq->lock).
6427e11629dSTejun Heo  */
6437e11629dSTejun Heo static void wake_up_worker(struct global_cwq *gcwq)
6447e11629dSTejun Heo {
6457e11629dSTejun Heo 	struct worker *worker = first_worker(gcwq);
6467e11629dSTejun Heo 
6477e11629dSTejun Heo 	if (likely(worker))
6487e11629dSTejun Heo 		wake_up_process(worker->task);
6497e11629dSTejun Heo }
6507e11629dSTejun Heo 
6514690c4abSTejun Heo /**
652e22bee78STejun Heo  * wq_worker_waking_up - a worker is waking up
653e22bee78STejun Heo  * @task: task waking up
654e22bee78STejun Heo  * @cpu: CPU @task is waking up to
655e22bee78STejun Heo  *
656e22bee78STejun Heo  * This function is called during try_to_wake_up() when a worker is
657e22bee78STejun Heo  * being awoken.
658e22bee78STejun Heo  *
659e22bee78STejun Heo  * CONTEXT:
660e22bee78STejun Heo  * spin_lock_irq(rq->lock)
661e22bee78STejun Heo  */
662e22bee78STejun Heo void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
663e22bee78STejun Heo {
664e22bee78STejun Heo 	struct worker *worker = kthread_data(task);
665e22bee78STejun Heo 
666e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
667e22bee78STejun Heo 		atomic_inc(get_gcwq_nr_running(cpu));
668e22bee78STejun Heo }
669e22bee78STejun Heo 
670e22bee78STejun Heo /**
671e22bee78STejun Heo  * wq_worker_sleeping - a worker is going to sleep
672e22bee78STejun Heo  * @task: task going to sleep
673e22bee78STejun Heo  * @cpu: CPU in question, must be the current CPU number
674e22bee78STejun Heo  *
675e22bee78STejun Heo  * This function is called during schedule() when a busy worker is
676e22bee78STejun Heo  * going to sleep.  Worker on the same cpu can be woken up by
677e22bee78STejun Heo  * returning pointer to its task.
678e22bee78STejun Heo  *
679e22bee78STejun Heo  * CONTEXT:
680e22bee78STejun Heo  * spin_lock_irq(rq->lock)
681e22bee78STejun Heo  *
682e22bee78STejun Heo  * RETURNS:
683e22bee78STejun Heo  * Worker task on @cpu to wake up, %NULL if none.
684e22bee78STejun Heo  */
685e22bee78STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task,
686e22bee78STejun Heo 				       unsigned int cpu)
687e22bee78STejun Heo {
688e22bee78STejun Heo 	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
689e22bee78STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
690e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(cpu);
691e22bee78STejun Heo 
692e22bee78STejun Heo 	if (unlikely(worker->flags & WORKER_NOT_RUNNING))
693e22bee78STejun Heo 		return NULL;
694e22bee78STejun Heo 
695e22bee78STejun Heo 	/* this can only happen on the local cpu */
696e22bee78STejun Heo 	BUG_ON(cpu != raw_smp_processor_id());
697e22bee78STejun Heo 
698e22bee78STejun Heo 	/*
699e22bee78STejun Heo 	 * The counterpart of the following dec_and_test, implied mb,
700e22bee78STejun Heo 	 * worklist not empty test sequence is in insert_work().
701e22bee78STejun Heo 	 * Please read comment there.
702e22bee78STejun Heo 	 *
703e22bee78STejun Heo 	 * NOT_RUNNING is clear.  This means that trustee is not in
704e22bee78STejun Heo 	 * charge and we're running on the local cpu w/ rq lock held
705e22bee78STejun Heo 	 * and preemption disabled, which in turn means that none else
706e22bee78STejun Heo 	 * could be manipulating idle_list, so dereferencing idle_list
707e22bee78STejun Heo 	 * without gcwq lock is safe.
708e22bee78STejun Heo 	 */
709e22bee78STejun Heo 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
710e22bee78STejun Heo 		to_wakeup = first_worker(gcwq);
711e22bee78STejun Heo 	return to_wakeup ? to_wakeup->task : NULL;
712e22bee78STejun Heo }
713e22bee78STejun Heo 
714e22bee78STejun Heo /**
715e22bee78STejun Heo  * worker_set_flags - set worker flags and adjust nr_running accordingly
716cb444766STejun Heo  * @worker: self
717d302f017STejun Heo  * @flags: flags to set
718d302f017STejun Heo  * @wakeup: wakeup an idle worker if necessary
719d302f017STejun Heo  *
720e22bee78STejun Heo  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
721e22bee78STejun Heo  * nr_running becomes zero and @wakeup is %true, an idle worker is
722e22bee78STejun Heo  * woken up.
723d302f017STejun Heo  *
724cb444766STejun Heo  * CONTEXT:
725cb444766STejun Heo  * spin_lock_irq(gcwq->lock)
726d302f017STejun Heo  */
727d302f017STejun Heo static inline void worker_set_flags(struct worker *worker, unsigned int flags,
728d302f017STejun Heo 				    bool wakeup)
729d302f017STejun Heo {
730e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
731e22bee78STejun Heo 
732cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
733cb444766STejun Heo 
734e22bee78STejun Heo 	/*
735e22bee78STejun Heo 	 * If transitioning into NOT_RUNNING, adjust nr_running and
736e22bee78STejun Heo 	 * wake up an idle worker as necessary if requested by
737e22bee78STejun Heo 	 * @wakeup.
738e22bee78STejun Heo 	 */
739e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) &&
740e22bee78STejun Heo 	    !(worker->flags & WORKER_NOT_RUNNING)) {
741e22bee78STejun Heo 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
742e22bee78STejun Heo 
743e22bee78STejun Heo 		if (wakeup) {
744e22bee78STejun Heo 			if (atomic_dec_and_test(nr_running) &&
745e22bee78STejun Heo 			    !list_empty(&gcwq->worklist))
746e22bee78STejun Heo 				wake_up_worker(gcwq);
747e22bee78STejun Heo 		} else
748e22bee78STejun Heo 			atomic_dec(nr_running);
749e22bee78STejun Heo 	}
750e22bee78STejun Heo 
751d302f017STejun Heo 	worker->flags |= flags;
752d302f017STejun Heo }
753d302f017STejun Heo 
754d302f017STejun Heo /**
755e22bee78STejun Heo  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
756cb444766STejun Heo  * @worker: self
757d302f017STejun Heo  * @flags: flags to clear
758d302f017STejun Heo  *
759e22bee78STejun Heo  * Clear @flags in @worker->flags and adjust nr_running accordingly.
760d302f017STejun Heo  *
761cb444766STejun Heo  * CONTEXT:
762cb444766STejun Heo  * spin_lock_irq(gcwq->lock)
763d302f017STejun Heo  */
764d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
765d302f017STejun Heo {
766e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
767e22bee78STejun Heo 	unsigned int oflags = worker->flags;
768e22bee78STejun Heo 
769cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
770cb444766STejun Heo 
771d302f017STejun Heo 	worker->flags &= ~flags;
772e22bee78STejun Heo 
773e22bee78STejun Heo 	/* if transitioning out of NOT_RUNNING, increment nr_running */
774e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
775e22bee78STejun Heo 		if (!(worker->flags & WORKER_NOT_RUNNING))
776e22bee78STejun Heo 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
777d302f017STejun Heo }
778d302f017STejun Heo 
779d302f017STejun Heo /**
780c8e55f36STejun Heo  * busy_worker_head - return the busy hash head for a work
781c8e55f36STejun Heo  * @gcwq: gcwq of interest
782c8e55f36STejun Heo  * @work: work to be hashed
783c8e55f36STejun Heo  *
784c8e55f36STejun Heo  * Return hash head of @gcwq for @work.
785c8e55f36STejun Heo  *
786c8e55f36STejun Heo  * CONTEXT:
787c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
788c8e55f36STejun Heo  *
789c8e55f36STejun Heo  * RETURNS:
790c8e55f36STejun Heo  * Pointer to the hash head.
791c8e55f36STejun Heo  */
792c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
793c8e55f36STejun Heo 					   struct work_struct *work)
794c8e55f36STejun Heo {
795c8e55f36STejun Heo 	const int base_shift = ilog2(sizeof(struct work_struct));
796c8e55f36STejun Heo 	unsigned long v = (unsigned long)work;
797c8e55f36STejun Heo 
798c8e55f36STejun Heo 	/* simple shift and fold hash, do we need something better? */
799c8e55f36STejun Heo 	v >>= base_shift;
800c8e55f36STejun Heo 	v += v >> BUSY_WORKER_HASH_ORDER;
801c8e55f36STejun Heo 	v &= BUSY_WORKER_HASH_MASK;
802c8e55f36STejun Heo 
803c8e55f36STejun Heo 	return &gcwq->busy_hash[v];
804c8e55f36STejun Heo }
805c8e55f36STejun Heo 
806c8e55f36STejun Heo /**
8078cca0eeaSTejun Heo  * __find_worker_executing_work - find worker which is executing a work
8088cca0eeaSTejun Heo  * @gcwq: gcwq of interest
8098cca0eeaSTejun Heo  * @bwh: hash head as returned by busy_worker_head()
8108cca0eeaSTejun Heo  * @work: work to find worker for
8118cca0eeaSTejun Heo  *
8128cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  @bwh should be
8138cca0eeaSTejun Heo  * the hash head obtained by calling busy_worker_head() with the same
8148cca0eeaSTejun Heo  * work.
8158cca0eeaSTejun Heo  *
8168cca0eeaSTejun Heo  * CONTEXT:
8178cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
8188cca0eeaSTejun Heo  *
8198cca0eeaSTejun Heo  * RETURNS:
8208cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
8218cca0eeaSTejun Heo  * otherwise.
8228cca0eeaSTejun Heo  */
8238cca0eeaSTejun Heo static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
8248cca0eeaSTejun Heo 						   struct hlist_head *bwh,
8258cca0eeaSTejun Heo 						   struct work_struct *work)
8268cca0eeaSTejun Heo {
8278cca0eeaSTejun Heo 	struct worker *worker;
8288cca0eeaSTejun Heo 	struct hlist_node *tmp;
8298cca0eeaSTejun Heo 
8308cca0eeaSTejun Heo 	hlist_for_each_entry(worker, tmp, bwh, hentry)
8318cca0eeaSTejun Heo 		if (worker->current_work == work)
8328cca0eeaSTejun Heo 			return worker;
8338cca0eeaSTejun Heo 	return NULL;
8348cca0eeaSTejun Heo }
8358cca0eeaSTejun Heo 
8368cca0eeaSTejun Heo /**
8378cca0eeaSTejun Heo  * find_worker_executing_work - find worker which is executing a work
8388cca0eeaSTejun Heo  * @gcwq: gcwq of interest
8398cca0eeaSTejun Heo  * @work: work to find worker for
8408cca0eeaSTejun Heo  *
8418cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  This function is
8428cca0eeaSTejun Heo  * identical to __find_worker_executing_work() except that this
8438cca0eeaSTejun Heo  * function calculates @bwh itself.
8448cca0eeaSTejun Heo  *
8458cca0eeaSTejun Heo  * CONTEXT:
8468cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
8478cca0eeaSTejun Heo  *
8488cca0eeaSTejun Heo  * RETURNS:
8498cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
8508cca0eeaSTejun Heo  * otherwise.
8518cca0eeaSTejun Heo  */
8528cca0eeaSTejun Heo static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
8538cca0eeaSTejun Heo 						 struct work_struct *work)
8548cca0eeaSTejun Heo {
8558cca0eeaSTejun Heo 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
8568cca0eeaSTejun Heo 					    work);
8578cca0eeaSTejun Heo }
8588cca0eeaSTejun Heo 
8598cca0eeaSTejun Heo /**
860649027d7STejun Heo  * gcwq_determine_ins_pos - find insertion position
861649027d7STejun Heo  * @gcwq: gcwq of interest
862649027d7STejun Heo  * @cwq: cwq a work is being queued for
863649027d7STejun Heo  *
864649027d7STejun Heo  * A work for @cwq is about to be queued on @gcwq, determine insertion
865649027d7STejun Heo  * position for the work.  If @cwq is for HIGHPRI wq, the work is
866649027d7STejun Heo  * queued at the head of the queue but in FIFO order with respect to
867649027d7STejun Heo  * other HIGHPRI works; otherwise, at the end of the queue.  This
868649027d7STejun Heo  * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
869649027d7STejun Heo  * there are HIGHPRI works pending.
870649027d7STejun Heo  *
871649027d7STejun Heo  * CONTEXT:
872649027d7STejun Heo  * spin_lock_irq(gcwq->lock).
873649027d7STejun Heo  *
874649027d7STejun Heo  * RETURNS:
875649027d7STejun Heo  * Pointer to inserstion position.
876649027d7STejun Heo  */
877649027d7STejun Heo static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
8781da177e4SLinus Torvalds 					       struct cpu_workqueue_struct *cwq)
8791da177e4SLinus Torvalds {
880649027d7STejun Heo 	struct work_struct *twork;
8811da177e4SLinus Torvalds 
882649027d7STejun Heo 	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
883649027d7STejun Heo 		return &gcwq->worklist;
8841da177e4SLinus Torvalds 
885649027d7STejun Heo 	list_for_each_entry(twork, &gcwq->worklist, entry) {
886649027d7STejun Heo 		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
887649027d7STejun Heo 
888649027d7STejun Heo 		if (!(tcwq->wq->flags & WQ_HIGHPRI))
889649027d7STejun Heo 			break;
8901da177e4SLinus Torvalds 	}
8911da177e4SLinus Torvalds 
892649027d7STejun Heo 	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
893649027d7STejun Heo 	return &twork->entry;
894649027d7STejun Heo }
895649027d7STejun Heo 
896649027d7STejun Heo /**
8977e11629dSTejun Heo  * insert_work - insert a work into gcwq
8984690c4abSTejun Heo  * @cwq: cwq @work belongs to
8994690c4abSTejun Heo  * @work: work to insert
9004690c4abSTejun Heo  * @head: insertion point
9014690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
9024690c4abSTejun Heo  *
9037e11629dSTejun Heo  * Insert @work which belongs to @cwq into @gcwq after @head.
9047e11629dSTejun Heo  * @extra_flags is or'd to work_struct flags.
9054690c4abSTejun Heo  *
9064690c4abSTejun Heo  * CONTEXT:
9078b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
9081da177e4SLinus Torvalds  */
909b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
9104690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
9114690c4abSTejun Heo 			unsigned int extra_flags)
912b89deed3SOleg Nesterov {
913e22bee78STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
914e1d8aa9fSFrederic Weisbecker 
9154690c4abSTejun Heo 	/* we own @work, set data and link */
9167a22ad75STejun Heo 	set_work_cwq(work, cwq, extra_flags);
9174690c4abSTejun Heo 
9186e84d644SOleg Nesterov 	/*
9196e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
9206e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
9216e84d644SOleg Nesterov 	 */
9226e84d644SOleg Nesterov 	smp_wmb();
9234690c4abSTejun Heo 
9241a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
925e22bee78STejun Heo 
926e22bee78STejun Heo 	/*
927e22bee78STejun Heo 	 * Ensure either worker_sched_deactivated() sees the above
928e22bee78STejun Heo 	 * list_add_tail() or we see zero nr_running to avoid workers
929e22bee78STejun Heo 	 * lying around lazily while there are works to be processed.
930e22bee78STejun Heo 	 */
931e22bee78STejun Heo 	smp_mb();
932e22bee78STejun Heo 
933649027d7STejun Heo 	if (__need_more_worker(gcwq))
934e22bee78STejun Heo 		wake_up_worker(gcwq);
935b89deed3SOleg Nesterov }
936b89deed3SOleg Nesterov 
9374690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
9381da177e4SLinus Torvalds 			 struct work_struct *work)
9391da177e4SLinus Torvalds {
940502ca9d8STejun Heo 	struct global_cwq *gcwq;
941502ca9d8STejun Heo 	struct cpu_workqueue_struct *cwq;
9421e19ffc6STejun Heo 	struct list_head *worklist;
9431da177e4SLinus Torvalds 	unsigned long flags;
9441da177e4SLinus Torvalds 
945dc186ad7SThomas Gleixner 	debug_work_activate(work);
9461e19ffc6STejun Heo 
947c7fc77f7STejun Heo 	/* determine gcwq to use */
948c7fc77f7STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
949c7fc77f7STejun Heo 		struct global_cwq *last_gcwq;
950c7fc77f7STejun Heo 
951f3421797STejun Heo 		if (unlikely(cpu == WORK_CPU_UNBOUND))
952f3421797STejun Heo 			cpu = raw_smp_processor_id();
953f3421797STejun Heo 
95418aa9effSTejun Heo 		/*
95518aa9effSTejun Heo 		 * It's multi cpu.  If @wq is non-reentrant and @work
95618aa9effSTejun Heo 		 * was previously on a different cpu, it might still
95718aa9effSTejun Heo 		 * be running there, in which case the work needs to
95818aa9effSTejun Heo 		 * be queued on that cpu to guarantee non-reentrance.
95918aa9effSTejun Heo 		 */
960502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
96118aa9effSTejun Heo 		if (wq->flags & WQ_NON_REENTRANT &&
96218aa9effSTejun Heo 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
96318aa9effSTejun Heo 			struct worker *worker;
96418aa9effSTejun Heo 
96518aa9effSTejun Heo 			spin_lock_irqsave(&last_gcwq->lock, flags);
96618aa9effSTejun Heo 
96718aa9effSTejun Heo 			worker = find_worker_executing_work(last_gcwq, work);
96818aa9effSTejun Heo 
96918aa9effSTejun Heo 			if (worker && worker->current_cwq->wq == wq)
97018aa9effSTejun Heo 				gcwq = last_gcwq;
97118aa9effSTejun Heo 			else {
97218aa9effSTejun Heo 				/* meh... not running there, queue here */
97318aa9effSTejun Heo 				spin_unlock_irqrestore(&last_gcwq->lock, flags);
97418aa9effSTejun Heo 				spin_lock_irqsave(&gcwq->lock, flags);
97518aa9effSTejun Heo 			}
97618aa9effSTejun Heo 		} else
9778b03ae3cSTejun Heo 			spin_lock_irqsave(&gcwq->lock, flags);
978f3421797STejun Heo 	} else {
979f3421797STejun Heo 		gcwq = get_gcwq(WORK_CPU_UNBOUND);
980f3421797STejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
981502ca9d8STejun Heo 	}
982502ca9d8STejun Heo 
983502ca9d8STejun Heo 	/* gcwq determined, get cwq and queue */
984502ca9d8STejun Heo 	cwq = get_cwq(gcwq->cpu, wq);
985502ca9d8STejun Heo 
9864690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
9871e19ffc6STejun Heo 
98873f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
9891e19ffc6STejun Heo 
9901e19ffc6STejun Heo 	if (likely(cwq->nr_active < cwq->max_active)) {
9911e19ffc6STejun Heo 		cwq->nr_active++;
992649027d7STejun Heo 		worklist = gcwq_determine_ins_pos(gcwq, cwq);
9931e19ffc6STejun Heo 	} else
9941e19ffc6STejun Heo 		worklist = &cwq->delayed_works;
9951e19ffc6STejun Heo 
9961e19ffc6STejun Heo 	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
9971e19ffc6STejun Heo 
9988b03ae3cSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
9991da177e4SLinus Torvalds }
10001da177e4SLinus Torvalds 
10010fcb78c2SRolf Eike Beer /**
10020fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
10030fcb78c2SRolf Eike Beer  * @wq: workqueue to use
10040fcb78c2SRolf Eike Beer  * @work: work to queue
10050fcb78c2SRolf Eike Beer  *
1006057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10071da177e4SLinus Torvalds  *
100800dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
100900dfcaf7SOleg Nesterov  * it can be processed by another CPU.
10101da177e4SLinus Torvalds  */
10117ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
10121da177e4SLinus Torvalds {
1013ef1ca236SOleg Nesterov 	int ret;
10141da177e4SLinus Torvalds 
1015ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
1016a848e3b6SOleg Nesterov 	put_cpu();
1017ef1ca236SOleg Nesterov 
10181da177e4SLinus Torvalds 	return ret;
10191da177e4SLinus Torvalds }
1020ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
10211da177e4SLinus Torvalds 
1022c1a220e7SZhang Rui /**
1023c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
1024c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
1025c1a220e7SZhang Rui  * @wq: workqueue to use
1026c1a220e7SZhang Rui  * @work: work to queue
1027c1a220e7SZhang Rui  *
1028c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
1029c1a220e7SZhang Rui  *
1030c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
1031c1a220e7SZhang Rui  * can't go away.
1032c1a220e7SZhang Rui  */
1033c1a220e7SZhang Rui int
1034c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1035c1a220e7SZhang Rui {
1036c1a220e7SZhang Rui 	int ret = 0;
1037c1a220e7SZhang Rui 
103822df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
10394690c4abSTejun Heo 		__queue_work(cpu, wq, work);
1040c1a220e7SZhang Rui 		ret = 1;
1041c1a220e7SZhang Rui 	}
1042c1a220e7SZhang Rui 	return ret;
1043c1a220e7SZhang Rui }
1044c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
1045c1a220e7SZhang Rui 
10466d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
10471da177e4SLinus Torvalds {
104852bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
10497a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
10501da177e4SLinus Torvalds 
10514690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
10521da177e4SLinus Torvalds }
10531da177e4SLinus Torvalds 
10540fcb78c2SRolf Eike Beer /**
10550fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
10560fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1057af9997e4SRandy Dunlap  * @dwork: delayable work to queue
10580fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
10590fcb78c2SRolf Eike Beer  *
1060057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10610fcb78c2SRolf Eike Beer  */
10627ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
106352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10641da177e4SLinus Torvalds {
106552bad64dSDavid Howells 	if (delay == 0)
106663bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
10671da177e4SLinus Torvalds 
106863bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
10691da177e4SLinus Torvalds }
1070ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
10711da177e4SLinus Torvalds 
10720fcb78c2SRolf Eike Beer /**
10730fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
10740fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
10750fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1076af9997e4SRandy Dunlap  * @dwork: work to queue
10770fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
10780fcb78c2SRolf Eike Beer  *
1079057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10800fcb78c2SRolf Eike Beer  */
10817a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
108252bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10837a6bc1cdSVenkatesh Pallipadi {
10847a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
108552bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
108652bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
10877a6bc1cdSVenkatesh Pallipadi 
108822df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1089c7fc77f7STejun Heo 		unsigned int lcpu;
10907a22ad75STejun Heo 
10917a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
10927a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
10937a6bc1cdSVenkatesh Pallipadi 
10948a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
10958a3e77ccSAndrew Liu 
10967a22ad75STejun Heo 		/*
10977a22ad75STejun Heo 		 * This stores cwq for the moment, for the timer_fn.
10987a22ad75STejun Heo 		 * Note that the work's gcwq is preserved to allow
10997a22ad75STejun Heo 		 * reentrance detection for delayed works.
11007a22ad75STejun Heo 		 */
1101c7fc77f7STejun Heo 		if (!(wq->flags & WQ_UNBOUND)) {
1102c7fc77f7STejun Heo 			struct global_cwq *gcwq = get_work_gcwq(work);
1103c7fc77f7STejun Heo 
1104c7fc77f7STejun Heo 			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1105c7fc77f7STejun Heo 				lcpu = gcwq->cpu;
1106c7fc77f7STejun Heo 			else
1107c7fc77f7STejun Heo 				lcpu = raw_smp_processor_id();
1108c7fc77f7STejun Heo 		} else
1109c7fc77f7STejun Heo 			lcpu = WORK_CPU_UNBOUND;
1110c7fc77f7STejun Heo 
11117a22ad75STejun Heo 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
1112c7fc77f7STejun Heo 
11137a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
111452bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
11157a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
111663bc0362SOleg Nesterov 
111763bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
11187a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
111963bc0362SOleg Nesterov 		else
112063bc0362SOleg Nesterov 			add_timer(timer);
11217a6bc1cdSVenkatesh Pallipadi 		ret = 1;
11227a6bc1cdSVenkatesh Pallipadi 	}
11237a6bc1cdSVenkatesh Pallipadi 	return ret;
11247a6bc1cdSVenkatesh Pallipadi }
1125ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
11261da177e4SLinus Torvalds 
1127c8e55f36STejun Heo /**
1128c8e55f36STejun Heo  * worker_enter_idle - enter idle state
1129c8e55f36STejun Heo  * @worker: worker which is entering idle state
1130c8e55f36STejun Heo  *
1131c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
1132c8e55f36STejun Heo  * necessary.
1133c8e55f36STejun Heo  *
1134c8e55f36STejun Heo  * LOCKING:
1135c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1136c8e55f36STejun Heo  */
1137c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
11381da177e4SLinus Torvalds {
1139c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1140c8e55f36STejun Heo 
1141c8e55f36STejun Heo 	BUG_ON(worker->flags & WORKER_IDLE);
1142c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->entry) &&
1143c8e55f36STejun Heo 	       (worker->hentry.next || worker->hentry.pprev));
1144c8e55f36STejun Heo 
1145cb444766STejun Heo 	/* can't use worker_set_flags(), also called from start_worker() */
1146cb444766STejun Heo 	worker->flags |= WORKER_IDLE;
1147c8e55f36STejun Heo 	gcwq->nr_idle++;
1148e22bee78STejun Heo 	worker->last_active = jiffies;
1149c8e55f36STejun Heo 
1150c8e55f36STejun Heo 	/* idle_list is LIFO */
1151c8e55f36STejun Heo 	list_add(&worker->entry, &gcwq->idle_list);
1152db7bccf4STejun Heo 
1153e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_ROGUE))) {
1154e22bee78STejun Heo 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1155e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer,
1156e22bee78STejun Heo 				  jiffies + IDLE_WORKER_TIMEOUT);
1157e22bee78STejun Heo 	} else
1158db7bccf4STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1159cb444766STejun Heo 
1160cb444766STejun Heo 	/* sanity check nr_running */
1161cb444766STejun Heo 	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1162cb444766STejun Heo 		     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1163c8e55f36STejun Heo }
1164c8e55f36STejun Heo 
1165c8e55f36STejun Heo /**
1166c8e55f36STejun Heo  * worker_leave_idle - leave idle state
1167c8e55f36STejun Heo  * @worker: worker which is leaving idle state
1168c8e55f36STejun Heo  *
1169c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
1170c8e55f36STejun Heo  *
1171c8e55f36STejun Heo  * LOCKING:
1172c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1173c8e55f36STejun Heo  */
1174c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
1175c8e55f36STejun Heo {
1176c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1177c8e55f36STejun Heo 
1178c8e55f36STejun Heo 	BUG_ON(!(worker->flags & WORKER_IDLE));
1179d302f017STejun Heo 	worker_clr_flags(worker, WORKER_IDLE);
1180c8e55f36STejun Heo 	gcwq->nr_idle--;
1181c8e55f36STejun Heo 	list_del_init(&worker->entry);
1182c8e55f36STejun Heo }
1183c8e55f36STejun Heo 
1184e22bee78STejun Heo /**
1185e22bee78STejun Heo  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1186e22bee78STejun Heo  * @worker: self
1187e22bee78STejun Heo  *
1188e22bee78STejun Heo  * Works which are scheduled while the cpu is online must at least be
1189e22bee78STejun Heo  * scheduled to a worker which is bound to the cpu so that if they are
1190e22bee78STejun Heo  * flushed from cpu callbacks while cpu is going down, they are
1191e22bee78STejun Heo  * guaranteed to execute on the cpu.
1192e22bee78STejun Heo  *
1193e22bee78STejun Heo  * This function is to be used by rogue workers and rescuers to bind
1194e22bee78STejun Heo  * themselves to the target cpu and may race with cpu going down or
1195e22bee78STejun Heo  * coming online.  kthread_bind() can't be used because it may put the
1196e22bee78STejun Heo  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1197e22bee78STejun Heo  * verbatim as it's best effort and blocking and gcwq may be
1198e22bee78STejun Heo  * [dis]associated in the meantime.
1199e22bee78STejun Heo  *
1200e22bee78STejun Heo  * This function tries set_cpus_allowed() and locks gcwq and verifies
1201e22bee78STejun Heo  * the binding against GCWQ_DISASSOCIATED which is set during
1202e22bee78STejun Heo  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1203e22bee78STejun Heo  * idle state or fetches works without dropping lock, it can guarantee
1204e22bee78STejun Heo  * the scheduling requirement described in the first paragraph.
1205e22bee78STejun Heo  *
1206e22bee78STejun Heo  * CONTEXT:
1207e22bee78STejun Heo  * Might sleep.  Called without any lock but returns with gcwq->lock
1208e22bee78STejun Heo  * held.
1209e22bee78STejun Heo  *
1210e22bee78STejun Heo  * RETURNS:
1211e22bee78STejun Heo  * %true if the associated gcwq is online (@worker is successfully
1212e22bee78STejun Heo  * bound), %false if offline.
1213e22bee78STejun Heo  */
1214e22bee78STejun Heo static bool worker_maybe_bind_and_lock(struct worker *worker)
1215e22bee78STejun Heo {
1216e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1217e22bee78STejun Heo 	struct task_struct *task = worker->task;
1218e22bee78STejun Heo 
1219e22bee78STejun Heo 	while (true) {
1220e22bee78STejun Heo 		/*
1221e22bee78STejun Heo 		 * The following call may fail, succeed or succeed
1222e22bee78STejun Heo 		 * without actually migrating the task to the cpu if
1223e22bee78STejun Heo 		 * it races with cpu hotunplug operation.  Verify
1224e22bee78STejun Heo 		 * against GCWQ_DISASSOCIATED.
1225e22bee78STejun Heo 		 */
1226f3421797STejun Heo 		if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1227e22bee78STejun Heo 			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1228e22bee78STejun Heo 
1229e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1230e22bee78STejun Heo 		if (gcwq->flags & GCWQ_DISASSOCIATED)
1231e22bee78STejun Heo 			return false;
1232e22bee78STejun Heo 		if (task_cpu(task) == gcwq->cpu &&
1233e22bee78STejun Heo 		    cpumask_equal(&current->cpus_allowed,
1234e22bee78STejun Heo 				  get_cpu_mask(gcwq->cpu)))
1235e22bee78STejun Heo 			return true;
1236e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1237e22bee78STejun Heo 
1238e22bee78STejun Heo 		/* CPU has come up inbetween, retry migration */
1239e22bee78STejun Heo 		cpu_relax();
1240e22bee78STejun Heo 	}
1241e22bee78STejun Heo }
1242e22bee78STejun Heo 
1243e22bee78STejun Heo /*
1244e22bee78STejun Heo  * Function for worker->rebind_work used to rebind rogue busy workers
1245e22bee78STejun Heo  * to the associated cpu which is coming back online.  This is
1246e22bee78STejun Heo  * scheduled by cpu up but can race with other cpu hotplug operations
1247e22bee78STejun Heo  * and may be executed twice without intervening cpu down.
1248e22bee78STejun Heo  */
1249e22bee78STejun Heo static void worker_rebind_fn(struct work_struct *work)
1250e22bee78STejun Heo {
1251e22bee78STejun Heo 	struct worker *worker = container_of(work, struct worker, rebind_work);
1252e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1253e22bee78STejun Heo 
1254e22bee78STejun Heo 	if (worker_maybe_bind_and_lock(worker))
1255e22bee78STejun Heo 		worker_clr_flags(worker, WORKER_REBIND);
1256e22bee78STejun Heo 
1257e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1258e22bee78STejun Heo }
1259e22bee78STejun Heo 
1260c34056a3STejun Heo static struct worker *alloc_worker(void)
1261c34056a3STejun Heo {
1262c34056a3STejun Heo 	struct worker *worker;
1263c34056a3STejun Heo 
1264c34056a3STejun Heo 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1265c8e55f36STejun Heo 	if (worker) {
1266c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
1267affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
1268e22bee78STejun Heo 		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1269e22bee78STejun Heo 		/* on creation a worker is in !idle && prep state */
1270e22bee78STejun Heo 		worker->flags = WORKER_PREP;
1271c8e55f36STejun Heo 	}
1272c34056a3STejun Heo 	return worker;
1273c34056a3STejun Heo }
1274c34056a3STejun Heo 
1275c34056a3STejun Heo /**
1276c34056a3STejun Heo  * create_worker - create a new workqueue worker
12777e11629dSTejun Heo  * @gcwq: gcwq the new worker will belong to
1278c34056a3STejun Heo  * @bind: whether to set affinity to @cpu or not
1279c34056a3STejun Heo  *
12807e11629dSTejun Heo  * Create a new worker which is bound to @gcwq.  The returned worker
1281c34056a3STejun Heo  * can be started by calling start_worker() or destroyed using
1282c34056a3STejun Heo  * destroy_worker().
1283c34056a3STejun Heo  *
1284c34056a3STejun Heo  * CONTEXT:
1285c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
1286c34056a3STejun Heo  *
1287c34056a3STejun Heo  * RETURNS:
1288c34056a3STejun Heo  * Pointer to the newly created worker.
1289c34056a3STejun Heo  */
12907e11629dSTejun Heo static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1291c34056a3STejun Heo {
1292f3421797STejun Heo 	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1293c34056a3STejun Heo 	struct worker *worker = NULL;
1294f3421797STejun Heo 	int id = -1;
1295c34056a3STejun Heo 
12968b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
12978b03ae3cSTejun Heo 	while (ida_get_new(&gcwq->worker_ida, &id)) {
12988b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
12998b03ae3cSTejun Heo 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1300c34056a3STejun Heo 			goto fail;
13018b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
1302c34056a3STejun Heo 	}
13038b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1304c34056a3STejun Heo 
1305c34056a3STejun Heo 	worker = alloc_worker();
1306c34056a3STejun Heo 	if (!worker)
1307c34056a3STejun Heo 		goto fail;
1308c34056a3STejun Heo 
13098b03ae3cSTejun Heo 	worker->gcwq = gcwq;
1310c34056a3STejun Heo 	worker->id = id;
1311c34056a3STejun Heo 
1312f3421797STejun Heo 	if (!on_unbound_cpu)
1313f3421797STejun Heo 		worker->task = kthread_create(worker_thread, worker,
1314f3421797STejun Heo 					      "kworker/%u:%d", gcwq->cpu, id);
1315f3421797STejun Heo 	else
1316f3421797STejun Heo 		worker->task = kthread_create(worker_thread, worker,
1317f3421797STejun Heo 					      "kworker/u:%d", id);
1318c34056a3STejun Heo 	if (IS_ERR(worker->task))
1319c34056a3STejun Heo 		goto fail;
1320c34056a3STejun Heo 
1321db7bccf4STejun Heo 	/*
1322db7bccf4STejun Heo 	 * A rogue worker will become a regular one if CPU comes
1323db7bccf4STejun Heo 	 * online later on.  Make sure every worker has
1324db7bccf4STejun Heo 	 * PF_THREAD_BOUND set.
1325db7bccf4STejun Heo 	 */
1326f3421797STejun Heo 	if (bind && !on_unbound_cpu)
13278b03ae3cSTejun Heo 		kthread_bind(worker->task, gcwq->cpu);
1328f3421797STejun Heo 	else {
1329db7bccf4STejun Heo 		worker->task->flags |= PF_THREAD_BOUND;
1330f3421797STejun Heo 		if (on_unbound_cpu)
1331f3421797STejun Heo 			worker->flags |= WORKER_UNBOUND;
1332f3421797STejun Heo 	}
1333c34056a3STejun Heo 
1334c34056a3STejun Heo 	return worker;
1335c34056a3STejun Heo fail:
1336c34056a3STejun Heo 	if (id >= 0) {
13378b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
13388b03ae3cSTejun Heo 		ida_remove(&gcwq->worker_ida, id);
13398b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1340c34056a3STejun Heo 	}
1341c34056a3STejun Heo 	kfree(worker);
1342c34056a3STejun Heo 	return NULL;
1343c34056a3STejun Heo }
1344c34056a3STejun Heo 
1345c34056a3STejun Heo /**
1346c34056a3STejun Heo  * start_worker - start a newly created worker
1347c34056a3STejun Heo  * @worker: worker to start
1348c34056a3STejun Heo  *
1349c8e55f36STejun Heo  * Make the gcwq aware of @worker and start it.
1350c34056a3STejun Heo  *
1351c34056a3STejun Heo  * CONTEXT:
13528b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1353c34056a3STejun Heo  */
1354c34056a3STejun Heo static void start_worker(struct worker *worker)
1355c34056a3STejun Heo {
1356cb444766STejun Heo 	worker->flags |= WORKER_STARTED;
1357c8e55f36STejun Heo 	worker->gcwq->nr_workers++;
1358c8e55f36STejun Heo 	worker_enter_idle(worker);
1359c34056a3STejun Heo 	wake_up_process(worker->task);
1360c34056a3STejun Heo }
1361c34056a3STejun Heo 
1362c34056a3STejun Heo /**
1363c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
1364c34056a3STejun Heo  * @worker: worker to be destroyed
1365c34056a3STejun Heo  *
1366c8e55f36STejun Heo  * Destroy @worker and adjust @gcwq stats accordingly.
1367c8e55f36STejun Heo  *
1368c8e55f36STejun Heo  * CONTEXT:
1369c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1370c34056a3STejun Heo  */
1371c34056a3STejun Heo static void destroy_worker(struct worker *worker)
1372c34056a3STejun Heo {
13738b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1374c34056a3STejun Heo 	int id = worker->id;
1375c34056a3STejun Heo 
1376c34056a3STejun Heo 	/* sanity check frenzy */
1377c34056a3STejun Heo 	BUG_ON(worker->current_work);
1378affee4b2STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1379c34056a3STejun Heo 
1380c8e55f36STejun Heo 	if (worker->flags & WORKER_STARTED)
1381c8e55f36STejun Heo 		gcwq->nr_workers--;
1382c8e55f36STejun Heo 	if (worker->flags & WORKER_IDLE)
1383c8e55f36STejun Heo 		gcwq->nr_idle--;
1384c8e55f36STejun Heo 
1385c8e55f36STejun Heo 	list_del_init(&worker->entry);
1386cb444766STejun Heo 	worker->flags |= WORKER_DIE;
1387c8e55f36STejun Heo 
1388c8e55f36STejun Heo 	spin_unlock_irq(&gcwq->lock);
1389c8e55f36STejun Heo 
1390c34056a3STejun Heo 	kthread_stop(worker->task);
1391c34056a3STejun Heo 	kfree(worker);
1392c34056a3STejun Heo 
13938b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
13948b03ae3cSTejun Heo 	ida_remove(&gcwq->worker_ida, id);
1395c34056a3STejun Heo }
1396c34056a3STejun Heo 
1397e22bee78STejun Heo static void idle_worker_timeout(unsigned long __gcwq)
1398e22bee78STejun Heo {
1399e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1400e22bee78STejun Heo 
1401e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1402e22bee78STejun Heo 
1403e22bee78STejun Heo 	if (too_many_workers(gcwq)) {
1404e22bee78STejun Heo 		struct worker *worker;
1405e22bee78STejun Heo 		unsigned long expires;
1406e22bee78STejun Heo 
1407e22bee78STejun Heo 		/* idle_list is kept in LIFO order, check the last one */
1408e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1409e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1410e22bee78STejun Heo 
1411e22bee78STejun Heo 		if (time_before(jiffies, expires))
1412e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1413e22bee78STejun Heo 		else {
1414e22bee78STejun Heo 			/* it's been idle for too long, wake up manager */
1415e22bee78STejun Heo 			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1416e22bee78STejun Heo 			wake_up_worker(gcwq);
1417e22bee78STejun Heo 		}
1418e22bee78STejun Heo 	}
1419e22bee78STejun Heo 
1420e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1421e22bee78STejun Heo }
1422e22bee78STejun Heo 
1423e22bee78STejun Heo static bool send_mayday(struct work_struct *work)
1424e22bee78STejun Heo {
1425e22bee78STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1426e22bee78STejun Heo 	struct workqueue_struct *wq = cwq->wq;
1427f3421797STejun Heo 	unsigned int cpu;
1428e22bee78STejun Heo 
1429e22bee78STejun Heo 	if (!(wq->flags & WQ_RESCUER))
1430e22bee78STejun Heo 		return false;
1431e22bee78STejun Heo 
1432e22bee78STejun Heo 	/* mayday mayday mayday */
1433f3421797STejun Heo 	cpu = cwq->gcwq->cpu;
1434f3421797STejun Heo 	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1435f3421797STejun Heo 	if (cpu == WORK_CPU_UNBOUND)
1436f3421797STejun Heo 		cpu = 0;
1437f2e005aaSTejun Heo 	if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1438e22bee78STejun Heo 		wake_up_process(wq->rescuer->task);
1439e22bee78STejun Heo 	return true;
1440e22bee78STejun Heo }
1441e22bee78STejun Heo 
1442e22bee78STejun Heo static void gcwq_mayday_timeout(unsigned long __gcwq)
1443e22bee78STejun Heo {
1444e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1445e22bee78STejun Heo 	struct work_struct *work;
1446e22bee78STejun Heo 
1447e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1448e22bee78STejun Heo 
1449e22bee78STejun Heo 	if (need_to_create_worker(gcwq)) {
1450e22bee78STejun Heo 		/*
1451e22bee78STejun Heo 		 * We've been trying to create a new worker but
1452e22bee78STejun Heo 		 * haven't been successful.  We might be hitting an
1453e22bee78STejun Heo 		 * allocation deadlock.  Send distress signals to
1454e22bee78STejun Heo 		 * rescuers.
1455e22bee78STejun Heo 		 */
1456e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry)
1457e22bee78STejun Heo 			send_mayday(work);
1458e22bee78STejun Heo 	}
1459e22bee78STejun Heo 
1460e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1461e22bee78STejun Heo 
1462e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1463e22bee78STejun Heo }
1464e22bee78STejun Heo 
1465e22bee78STejun Heo /**
1466e22bee78STejun Heo  * maybe_create_worker - create a new worker if necessary
1467e22bee78STejun Heo  * @gcwq: gcwq to create a new worker for
1468e22bee78STejun Heo  *
1469e22bee78STejun Heo  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1470e22bee78STejun Heo  * have at least one idle worker on return from this function.  If
1471e22bee78STejun Heo  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1472e22bee78STejun Heo  * sent to all rescuers with works scheduled on @gcwq to resolve
1473e22bee78STejun Heo  * possible allocation deadlock.
1474e22bee78STejun Heo  *
1475e22bee78STejun Heo  * On return, need_to_create_worker() is guaranteed to be false and
1476e22bee78STejun Heo  * may_start_working() true.
1477e22bee78STejun Heo  *
1478e22bee78STejun Heo  * LOCKING:
1479e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1480e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.  Called only from
1481e22bee78STejun Heo  * manager.
1482e22bee78STejun Heo  *
1483e22bee78STejun Heo  * RETURNS:
1484e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1485e22bee78STejun Heo  * otherwise.
1486e22bee78STejun Heo  */
1487e22bee78STejun Heo static bool maybe_create_worker(struct global_cwq *gcwq)
1488*06bd6ebfSNamhyung Kim __releases(&gcwq->lock)
1489*06bd6ebfSNamhyung Kim __acquires(&gcwq->lock)
1490e22bee78STejun Heo {
1491e22bee78STejun Heo 	if (!need_to_create_worker(gcwq))
1492e22bee78STejun Heo 		return false;
1493e22bee78STejun Heo restart:
14949f9c2364STejun Heo 	spin_unlock_irq(&gcwq->lock);
14959f9c2364STejun Heo 
1496e22bee78STejun Heo 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1497e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1498e22bee78STejun Heo 
1499e22bee78STejun Heo 	while (true) {
1500e22bee78STejun Heo 		struct worker *worker;
1501e22bee78STejun Heo 
1502e22bee78STejun Heo 		worker = create_worker(gcwq, true);
1503e22bee78STejun Heo 		if (worker) {
1504e22bee78STejun Heo 			del_timer_sync(&gcwq->mayday_timer);
1505e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
1506e22bee78STejun Heo 			start_worker(worker);
1507e22bee78STejun Heo 			BUG_ON(need_to_create_worker(gcwq));
1508e22bee78STejun Heo 			return true;
1509e22bee78STejun Heo 		}
1510e22bee78STejun Heo 
1511e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1512e22bee78STejun Heo 			break;
1513e22bee78STejun Heo 
1514e22bee78STejun Heo 		__set_current_state(TASK_INTERRUPTIBLE);
1515e22bee78STejun Heo 		schedule_timeout(CREATE_COOLDOWN);
15169f9c2364STejun Heo 
1517e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1518e22bee78STejun Heo 			break;
1519e22bee78STejun Heo 	}
1520e22bee78STejun Heo 
1521e22bee78STejun Heo 	del_timer_sync(&gcwq->mayday_timer);
1522e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1523e22bee78STejun Heo 	if (need_to_create_worker(gcwq))
1524e22bee78STejun Heo 		goto restart;
1525e22bee78STejun Heo 	return true;
1526e22bee78STejun Heo }
1527e22bee78STejun Heo 
1528e22bee78STejun Heo /**
1529e22bee78STejun Heo  * maybe_destroy_worker - destroy workers which have been idle for a while
1530e22bee78STejun Heo  * @gcwq: gcwq to destroy workers for
1531e22bee78STejun Heo  *
1532e22bee78STejun Heo  * Destroy @gcwq workers which have been idle for longer than
1533e22bee78STejun Heo  * IDLE_WORKER_TIMEOUT.
1534e22bee78STejun Heo  *
1535e22bee78STejun Heo  * LOCKING:
1536e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1537e22bee78STejun Heo  * multiple times.  Called only from manager.
1538e22bee78STejun Heo  *
1539e22bee78STejun Heo  * RETURNS:
1540e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1541e22bee78STejun Heo  * otherwise.
1542e22bee78STejun Heo  */
1543e22bee78STejun Heo static bool maybe_destroy_workers(struct global_cwq *gcwq)
1544e22bee78STejun Heo {
1545e22bee78STejun Heo 	bool ret = false;
1546e22bee78STejun Heo 
1547e22bee78STejun Heo 	while (too_many_workers(gcwq)) {
1548e22bee78STejun Heo 		struct worker *worker;
1549e22bee78STejun Heo 		unsigned long expires;
1550e22bee78STejun Heo 
1551e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1552e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1553e22bee78STejun Heo 
1554e22bee78STejun Heo 		if (time_before(jiffies, expires)) {
1555e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1556e22bee78STejun Heo 			break;
1557e22bee78STejun Heo 		}
1558e22bee78STejun Heo 
1559e22bee78STejun Heo 		destroy_worker(worker);
1560e22bee78STejun Heo 		ret = true;
1561e22bee78STejun Heo 	}
1562e22bee78STejun Heo 
1563e22bee78STejun Heo 	return ret;
1564e22bee78STejun Heo }
1565e22bee78STejun Heo 
1566e22bee78STejun Heo /**
1567e22bee78STejun Heo  * manage_workers - manage worker pool
1568e22bee78STejun Heo  * @worker: self
1569e22bee78STejun Heo  *
1570e22bee78STejun Heo  * Assume the manager role and manage gcwq worker pool @worker belongs
1571e22bee78STejun Heo  * to.  At any given time, there can be only zero or one manager per
1572e22bee78STejun Heo  * gcwq.  The exclusion is handled automatically by this function.
1573e22bee78STejun Heo  *
1574e22bee78STejun Heo  * The caller can safely start processing works on false return.  On
1575e22bee78STejun Heo  * true return, it's guaranteed that need_to_create_worker() is false
1576e22bee78STejun Heo  * and may_start_working() is true.
1577e22bee78STejun Heo  *
1578e22bee78STejun Heo  * CONTEXT:
1579e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1580e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.
1581e22bee78STejun Heo  *
1582e22bee78STejun Heo  * RETURNS:
1583e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true if
1584e22bee78STejun Heo  * some action was taken.
1585e22bee78STejun Heo  */
1586e22bee78STejun Heo static bool manage_workers(struct worker *worker)
1587e22bee78STejun Heo {
1588e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1589e22bee78STejun Heo 	bool ret = false;
1590e22bee78STejun Heo 
1591e22bee78STejun Heo 	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1592e22bee78STejun Heo 		return ret;
1593e22bee78STejun Heo 
1594e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1595e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1596e22bee78STejun Heo 
1597e22bee78STejun Heo 	/*
1598e22bee78STejun Heo 	 * Destroy and then create so that may_start_working() is true
1599e22bee78STejun Heo 	 * on return.
1600e22bee78STejun Heo 	 */
1601e22bee78STejun Heo 	ret |= maybe_destroy_workers(gcwq);
1602e22bee78STejun Heo 	ret |= maybe_create_worker(gcwq);
1603e22bee78STejun Heo 
1604e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1605e22bee78STejun Heo 
1606e22bee78STejun Heo 	/*
1607e22bee78STejun Heo 	 * The trustee might be waiting to take over the manager
1608e22bee78STejun Heo 	 * position, tell it we're done.
1609e22bee78STejun Heo 	 */
1610e22bee78STejun Heo 	if (unlikely(gcwq->trustee))
1611e22bee78STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1612e22bee78STejun Heo 
1613e22bee78STejun Heo 	return ret;
1614e22bee78STejun Heo }
1615e22bee78STejun Heo 
1616a62428c0STejun Heo /**
1617affee4b2STejun Heo  * move_linked_works - move linked works to a list
1618affee4b2STejun Heo  * @work: start of series of works to be scheduled
1619affee4b2STejun Heo  * @head: target list to append @work to
1620affee4b2STejun Heo  * @nextp: out paramter for nested worklist walking
1621affee4b2STejun Heo  *
1622affee4b2STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
1623affee4b2STejun Heo  * be scheduled starts at @work and includes any consecutive work with
1624affee4b2STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
1625affee4b2STejun Heo  *
1626affee4b2STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
1627affee4b2STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
1628affee4b2STejun Heo  * nested inside outer list_for_each_entry_safe().
1629affee4b2STejun Heo  *
1630affee4b2STejun Heo  * CONTEXT:
16318b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1632affee4b2STejun Heo  */
1633affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
1634affee4b2STejun Heo 			      struct work_struct **nextp)
1635affee4b2STejun Heo {
1636affee4b2STejun Heo 	struct work_struct *n;
1637affee4b2STejun Heo 
1638affee4b2STejun Heo 	/*
1639affee4b2STejun Heo 	 * Linked worklist will always end before the end of the list,
1640affee4b2STejun Heo 	 * use NULL for list head.
1641affee4b2STejun Heo 	 */
1642affee4b2STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1643affee4b2STejun Heo 		list_move_tail(&work->entry, head);
1644affee4b2STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1645affee4b2STejun Heo 			break;
1646affee4b2STejun Heo 	}
1647affee4b2STejun Heo 
1648affee4b2STejun Heo 	/*
1649affee4b2STejun Heo 	 * If we're already inside safe list traversal and have moved
1650affee4b2STejun Heo 	 * multiple works to the scheduled queue, the next position
1651affee4b2STejun Heo 	 * needs to be updated.
1652affee4b2STejun Heo 	 */
1653affee4b2STejun Heo 	if (nextp)
1654affee4b2STejun Heo 		*nextp = n;
1655affee4b2STejun Heo }
1656affee4b2STejun Heo 
16571e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
16581e19ffc6STejun Heo {
16591e19ffc6STejun Heo 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
16601da177e4SLinus Torvalds 						    struct work_struct, entry);
1661649027d7STejun Heo 	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
16621e19ffc6STejun Heo 
1663649027d7STejun Heo 	move_linked_works(work, pos, NULL);
16641e19ffc6STejun Heo 	cwq->nr_active++;
16651e19ffc6STejun Heo }
16661e19ffc6STejun Heo 
1667affee4b2STejun Heo /**
166873f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
166973f53c4aSTejun Heo  * @cwq: cwq of interest
167073f53c4aSTejun Heo  * @color: color of work which left the queue
167173f53c4aSTejun Heo  *
167273f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
167373f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
167473f53c4aSTejun Heo  *
167573f53c4aSTejun Heo  * CONTEXT:
16768b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
167773f53c4aSTejun Heo  */
167873f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
167973f53c4aSTejun Heo {
168073f53c4aSTejun Heo 	/* ignore uncolored works */
168173f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
168273f53c4aSTejun Heo 		return;
168373f53c4aSTejun Heo 
168473f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
16851e19ffc6STejun Heo 	cwq->nr_active--;
16861e19ffc6STejun Heo 
1687502ca9d8STejun Heo 	if (!list_empty(&cwq->delayed_works)) {
16881e19ffc6STejun Heo 		/* one down, submit a delayed one */
1689502ca9d8STejun Heo 		if (cwq->nr_active < cwq->max_active)
16901e19ffc6STejun Heo 			cwq_activate_first_delayed(cwq);
1691502ca9d8STejun Heo 	}
169273f53c4aSTejun Heo 
169373f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
169473f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
169573f53c4aSTejun Heo 		return;
169673f53c4aSTejun Heo 
169773f53c4aSTejun Heo 	/* are there still in-flight works? */
169873f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
169973f53c4aSTejun Heo 		return;
170073f53c4aSTejun Heo 
170173f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
170273f53c4aSTejun Heo 	cwq->flush_color = -1;
170373f53c4aSTejun Heo 
170473f53c4aSTejun Heo 	/*
170573f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
170673f53c4aSTejun Heo 	 * will handle the rest.
170773f53c4aSTejun Heo 	 */
170873f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
170973f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
171073f53c4aSTejun Heo }
171173f53c4aSTejun Heo 
171273f53c4aSTejun Heo /**
1713a62428c0STejun Heo  * process_one_work - process single work
1714c34056a3STejun Heo  * @worker: self
1715a62428c0STejun Heo  * @work: work to process
1716a62428c0STejun Heo  *
1717a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
1718a62428c0STejun Heo  * process a single work including synchronization against and
1719a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
1720a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
1721a62428c0STejun Heo  * call this function to process a work.
1722a62428c0STejun Heo  *
1723a62428c0STejun Heo  * CONTEXT:
17248b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1725a62428c0STejun Heo  */
1726c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
1727*06bd6ebfSNamhyung Kim __releases(&gcwq->lock)
1728*06bd6ebfSNamhyung Kim __acquires(&gcwq->lock)
17291da177e4SLinus Torvalds {
17307e11629dSTejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
17318b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
1732c8e55f36STejun Heo 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
1733fb0e7bebSTejun Heo 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
17346bb49e59SDavid Howells 	work_func_t f = work->func;
173573f53c4aSTejun Heo 	int work_color;
17367e11629dSTejun Heo 	struct worker *collision;
17374e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
17384e6045f1SJohannes Berg 	/*
1739a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
1740a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
1741a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
1742a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
1743a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
17444e6045f1SJohannes Berg 	 */
17454e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
17464e6045f1SJohannes Berg #endif
17477e11629dSTejun Heo 	/*
17487e11629dSTejun Heo 	 * A single work shouldn't be executed concurrently by
17497e11629dSTejun Heo 	 * multiple workers on a single cpu.  Check whether anyone is
17507e11629dSTejun Heo 	 * already processing the work.  If so, defer the work to the
17517e11629dSTejun Heo 	 * currently executing one.
17527e11629dSTejun Heo 	 */
17537e11629dSTejun Heo 	collision = __find_worker_executing_work(gcwq, bwh, work);
17547e11629dSTejun Heo 	if (unlikely(collision)) {
17557e11629dSTejun Heo 		move_linked_works(work, &collision->scheduled, NULL);
17567e11629dSTejun Heo 		return;
17577e11629dSTejun Heo 	}
17581da177e4SLinus Torvalds 
1759a62428c0STejun Heo 	/* claim and process */
17601da177e4SLinus Torvalds 	debug_work_deactivate(work);
1761c8e55f36STejun Heo 	hlist_add_head(&worker->hentry, bwh);
1762c34056a3STejun Heo 	worker->current_work = work;
17638cca0eeaSTejun Heo 	worker->current_cwq = cwq;
176473f53c4aSTejun Heo 	work_color = get_work_color(work);
17657a22ad75STejun Heo 
17667a22ad75STejun Heo 	/* record the current cpu number in the work data and dequeue */
17677a22ad75STejun Heo 	set_work_cpu(work, gcwq->cpu);
1768a62428c0STejun Heo 	list_del_init(&work->entry);
1769a62428c0STejun Heo 
1770649027d7STejun Heo 	/*
1771649027d7STejun Heo 	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1772649027d7STejun Heo 	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1773649027d7STejun Heo 	 */
1774649027d7STejun Heo 	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1775649027d7STejun Heo 		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1776649027d7STejun Heo 						struct work_struct, entry);
1777649027d7STejun Heo 
1778649027d7STejun Heo 		if (!list_empty(&gcwq->worklist) &&
1779649027d7STejun Heo 		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1780649027d7STejun Heo 			wake_up_worker(gcwq);
1781649027d7STejun Heo 		else
1782649027d7STejun Heo 			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1783649027d7STejun Heo 	}
1784649027d7STejun Heo 
1785fb0e7bebSTejun Heo 	/*
1786fb0e7bebSTejun Heo 	 * CPU intensive works don't participate in concurrency
1787fb0e7bebSTejun Heo 	 * management.  They're the scheduler's responsibility.
1788fb0e7bebSTejun Heo 	 */
1789fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
1790fb0e7bebSTejun Heo 		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1791fb0e7bebSTejun Heo 
17928b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
17931da177e4SLinus Torvalds 
179423b2e599SOleg Nesterov 	work_clear_pending(work);
17953295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
17963295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
179765f27f38SDavid Howells 	f(work);
17983295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
17993295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
18001da177e4SLinus Torvalds 
1801d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1802d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1803d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
1804a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
1805d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
1806d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
1807d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
1808d5abe669SPeter Zijlstra 		dump_stack();
1809d5abe669SPeter Zijlstra 	}
1810d5abe669SPeter Zijlstra 
18118b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1812a62428c0STejun Heo 
1813fb0e7bebSTejun Heo 	/* clear cpu intensive status */
1814fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
1815fb0e7bebSTejun Heo 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1816fb0e7bebSTejun Heo 
1817a62428c0STejun Heo 	/* we're done with it, release */
1818c8e55f36STejun Heo 	hlist_del_init(&worker->hentry);
1819c34056a3STejun Heo 	worker->current_work = NULL;
18208cca0eeaSTejun Heo 	worker->current_cwq = NULL;
182173f53c4aSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color);
18221da177e4SLinus Torvalds }
18231da177e4SLinus Torvalds 
1824affee4b2STejun Heo /**
1825affee4b2STejun Heo  * process_scheduled_works - process scheduled works
1826affee4b2STejun Heo  * @worker: self
1827affee4b2STejun Heo  *
1828affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
1829affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
1830affee4b2STejun Heo  * fetches a work from the top and executes it.
1831affee4b2STejun Heo  *
1832affee4b2STejun Heo  * CONTEXT:
18338b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1834affee4b2STejun Heo  * multiple times.
1835affee4b2STejun Heo  */
1836affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
18371da177e4SLinus Torvalds {
1838affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
1839affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
1840a62428c0STejun Heo 						struct work_struct, entry);
1841c34056a3STejun Heo 		process_one_work(worker, work);
1842a62428c0STejun Heo 	}
18431da177e4SLinus Torvalds }
18441da177e4SLinus Torvalds 
18454690c4abSTejun Heo /**
18464690c4abSTejun Heo  * worker_thread - the worker thread function
1847c34056a3STejun Heo  * @__worker: self
18484690c4abSTejun Heo  *
1849e22bee78STejun Heo  * The gcwq worker thread function.  There's a single dynamic pool of
1850e22bee78STejun Heo  * these per each cpu.  These workers process all works regardless of
1851e22bee78STejun Heo  * their specific target workqueue.  The only exception is works which
1852e22bee78STejun Heo  * belong to workqueues with a rescuer which will be explained in
1853e22bee78STejun Heo  * rescuer_thread().
18544690c4abSTejun Heo  */
1855c34056a3STejun Heo static int worker_thread(void *__worker)
18561da177e4SLinus Torvalds {
1857c34056a3STejun Heo 	struct worker *worker = __worker;
18588b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
18591da177e4SLinus Torvalds 
1860e22bee78STejun Heo 	/* tell the scheduler that this is a workqueue worker */
1861e22bee78STejun Heo 	worker->task->flags |= PF_WQ_WORKER;
1862c8e55f36STejun Heo woke_up:
18638b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1864affee4b2STejun Heo 
1865c8e55f36STejun Heo 	/* DIE can be set only while we're idle, checking here is enough */
1866c8e55f36STejun Heo 	if (worker->flags & WORKER_DIE) {
1867c8e55f36STejun Heo 		spin_unlock_irq(&gcwq->lock);
1868e22bee78STejun Heo 		worker->task->flags &= ~PF_WQ_WORKER;
1869c8e55f36STejun Heo 		return 0;
1870c8e55f36STejun Heo 	}
1871c8e55f36STejun Heo 
1872c8e55f36STejun Heo 	worker_leave_idle(worker);
1873db7bccf4STejun Heo recheck:
1874e22bee78STejun Heo 	/* no more worker necessary? */
1875e22bee78STejun Heo 	if (!need_more_worker(gcwq))
1876e22bee78STejun Heo 		goto sleep;
1877e22bee78STejun Heo 
1878e22bee78STejun Heo 	/* do we need to manage? */
1879e22bee78STejun Heo 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1880e22bee78STejun Heo 		goto recheck;
1881e22bee78STejun Heo 
1882c8e55f36STejun Heo 	/*
1883c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
1884c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
1885c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
1886c8e55f36STejun Heo 	 */
1887c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1888c8e55f36STejun Heo 
1889e22bee78STejun Heo 	/*
1890e22bee78STejun Heo 	 * When control reaches this point, we're guaranteed to have
1891e22bee78STejun Heo 	 * at least one idle worker or that someone else has already
1892e22bee78STejun Heo 	 * assumed the manager role.
1893e22bee78STejun Heo 	 */
1894e22bee78STejun Heo 	worker_clr_flags(worker, WORKER_PREP);
1895e22bee78STejun Heo 
1896e22bee78STejun Heo 	do {
1897affee4b2STejun Heo 		struct work_struct *work =
18987e11629dSTejun Heo 			list_first_entry(&gcwq->worklist,
1899affee4b2STejun Heo 					 struct work_struct, entry);
1900affee4b2STejun Heo 
1901c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1902affee4b2STejun Heo 			/* optimization path, not strictly necessary */
1903affee4b2STejun Heo 			process_one_work(worker, work);
1904affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
1905affee4b2STejun Heo 				process_scheduled_works(worker);
1906affee4b2STejun Heo 		} else {
1907c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
1908affee4b2STejun Heo 			process_scheduled_works(worker);
1909affee4b2STejun Heo 		}
1910e22bee78STejun Heo 	} while (keep_working(gcwq));
1911affee4b2STejun Heo 
1912e22bee78STejun Heo 	worker_set_flags(worker, WORKER_PREP, false);
1913d313dd85STejun Heo sleep:
1914e22bee78STejun Heo 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1915e22bee78STejun Heo 		goto recheck;
1916d313dd85STejun Heo 
1917c8e55f36STejun Heo 	/*
1918e22bee78STejun Heo 	 * gcwq->lock is held and there's no work to process and no
1919e22bee78STejun Heo 	 * need to manage, sleep.  Workers are woken up only while
1920e22bee78STejun Heo 	 * holding gcwq->lock or from local cpu, so setting the
1921e22bee78STejun Heo 	 * current state before releasing gcwq->lock is enough to
1922e22bee78STejun Heo 	 * prevent losing any event.
1923c8e55f36STejun Heo 	 */
1924c8e55f36STejun Heo 	worker_enter_idle(worker);
1925c8e55f36STejun Heo 	__set_current_state(TASK_INTERRUPTIBLE);
19268b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
19271da177e4SLinus Torvalds 	schedule();
1928c8e55f36STejun Heo 	goto woke_up;
19291da177e4SLinus Torvalds }
19301da177e4SLinus Torvalds 
1931e22bee78STejun Heo /**
1932e22bee78STejun Heo  * rescuer_thread - the rescuer thread function
1933e22bee78STejun Heo  * @__wq: the associated workqueue
1934e22bee78STejun Heo  *
1935e22bee78STejun Heo  * Workqueue rescuer thread function.  There's one rescuer for each
1936e22bee78STejun Heo  * workqueue which has WQ_RESCUER set.
1937e22bee78STejun Heo  *
1938e22bee78STejun Heo  * Regular work processing on a gcwq may block trying to create a new
1939e22bee78STejun Heo  * worker which uses GFP_KERNEL allocation which has slight chance of
1940e22bee78STejun Heo  * developing into deadlock if some works currently on the same queue
1941e22bee78STejun Heo  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
1942e22bee78STejun Heo  * the problem rescuer solves.
1943e22bee78STejun Heo  *
1944e22bee78STejun Heo  * When such condition is possible, the gcwq summons rescuers of all
1945e22bee78STejun Heo  * workqueues which have works queued on the gcwq and let them process
1946e22bee78STejun Heo  * those works so that forward progress can be guaranteed.
1947e22bee78STejun Heo  *
1948e22bee78STejun Heo  * This should happen rarely.
1949e22bee78STejun Heo  */
1950e22bee78STejun Heo static int rescuer_thread(void *__wq)
1951e22bee78STejun Heo {
1952e22bee78STejun Heo 	struct workqueue_struct *wq = __wq;
1953e22bee78STejun Heo 	struct worker *rescuer = wq->rescuer;
1954e22bee78STejun Heo 	struct list_head *scheduled = &rescuer->scheduled;
1955f3421797STejun Heo 	bool is_unbound = wq->flags & WQ_UNBOUND;
1956e22bee78STejun Heo 	unsigned int cpu;
1957e22bee78STejun Heo 
1958e22bee78STejun Heo 	set_user_nice(current, RESCUER_NICE_LEVEL);
1959e22bee78STejun Heo repeat:
1960e22bee78STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);
19611da177e4SLinus Torvalds 
19621da177e4SLinus Torvalds 	if (kthread_should_stop())
1963e22bee78STejun Heo 		return 0;
19641da177e4SLinus Torvalds 
1965f3421797STejun Heo 	/*
1966f3421797STejun Heo 	 * See whether any cpu is asking for help.  Unbounded
1967f3421797STejun Heo 	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1968f3421797STejun Heo 	 */
1969f2e005aaSTejun Heo 	for_each_mayday_cpu(cpu, wq->mayday_mask) {
1970f3421797STejun Heo 		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
1971f3421797STejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
1972e22bee78STejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
1973e22bee78STejun Heo 		struct work_struct *work, *n;
1974e22bee78STejun Heo 
1975e22bee78STejun Heo 		__set_current_state(TASK_RUNNING);
1976f2e005aaSTejun Heo 		mayday_clear_cpu(cpu, wq->mayday_mask);
1977e22bee78STejun Heo 
1978e22bee78STejun Heo 		/* migrate to the target cpu if possible */
1979e22bee78STejun Heo 		rescuer->gcwq = gcwq;
1980e22bee78STejun Heo 		worker_maybe_bind_and_lock(rescuer);
1981e22bee78STejun Heo 
1982e22bee78STejun Heo 		/*
1983e22bee78STejun Heo 		 * Slurp in all works issued via this workqueue and
1984e22bee78STejun Heo 		 * process'em.
1985e22bee78STejun Heo 		 */
1986e22bee78STejun Heo 		BUG_ON(!list_empty(&rescuer->scheduled));
1987e22bee78STejun Heo 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
1988e22bee78STejun Heo 			if (get_work_cwq(work) == cwq)
1989e22bee78STejun Heo 				move_linked_works(work, scheduled, &n);
1990e22bee78STejun Heo 
1991e22bee78STejun Heo 		process_scheduled_works(rescuer);
1992e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
19931da177e4SLinus Torvalds 	}
19941da177e4SLinus Torvalds 
1995e22bee78STejun Heo 	schedule();
1996e22bee78STejun Heo 	goto repeat;
19971da177e4SLinus Torvalds }
19981da177e4SLinus Torvalds 
1999fc2e4d70SOleg Nesterov struct wq_barrier {
2000fc2e4d70SOleg Nesterov 	struct work_struct	work;
2001fc2e4d70SOleg Nesterov 	struct completion	done;
2002fc2e4d70SOleg Nesterov };
2003fc2e4d70SOleg Nesterov 
2004fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
2005fc2e4d70SOleg Nesterov {
2006fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2007fc2e4d70SOleg Nesterov 	complete(&barr->done);
2008fc2e4d70SOleg Nesterov }
2009fc2e4d70SOleg Nesterov 
20104690c4abSTejun Heo /**
20114690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
20124690c4abSTejun Heo  * @cwq: cwq to insert barrier into
20134690c4abSTejun Heo  * @barr: wq_barrier to insert
2014affee4b2STejun Heo  * @target: target work to attach @barr to
2015affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
20164690c4abSTejun Heo  *
2017affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
2018affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
2019affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
2020affee4b2STejun Heo  * cpu.
2021affee4b2STejun Heo  *
2022affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
2023affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
2024affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
2025affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
2026affee4b2STejun Heo  * after a work with LINKED flag set.
2027affee4b2STejun Heo  *
2028affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
2029affee4b2STejun Heo  * underneath us, so we can't reliably determine cwq from @target.
20304690c4abSTejun Heo  *
20314690c4abSTejun Heo  * CONTEXT:
20328b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
20334690c4abSTejun Heo  */
203483c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2035affee4b2STejun Heo 			      struct wq_barrier *barr,
2036affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
2037fc2e4d70SOleg Nesterov {
2038affee4b2STejun Heo 	struct list_head *head;
2039affee4b2STejun Heo 	unsigned int linked = 0;
2040affee4b2STejun Heo 
2041dc186ad7SThomas Gleixner 	/*
20428b03ae3cSTejun Heo 	 * debugobject calls are safe here even with gcwq->lock locked
2043dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
2044dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
2045dc186ad7SThomas Gleixner 	 * might deadlock.
2046dc186ad7SThomas Gleixner 	 */
2047dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
204822df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2049fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
205083c22520SOleg Nesterov 
2051affee4b2STejun Heo 	/*
2052affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
2053affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
2054affee4b2STejun Heo 	 */
2055affee4b2STejun Heo 	if (worker)
2056affee4b2STejun Heo 		head = worker->scheduled.next;
2057affee4b2STejun Heo 	else {
2058affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
2059affee4b2STejun Heo 
2060affee4b2STejun Heo 		head = target->entry.next;
2061affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
2062affee4b2STejun Heo 		linked = *bits & WORK_STRUCT_LINKED;
2063affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2064affee4b2STejun Heo 	}
2065affee4b2STejun Heo 
2066dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
2067affee4b2STejun Heo 	insert_work(cwq, &barr->work, head,
2068affee4b2STejun Heo 		    work_color_to_flags(WORK_NO_COLOR) | linked);
2069fc2e4d70SOleg Nesterov }
2070fc2e4d70SOleg Nesterov 
207173f53c4aSTejun Heo /**
207273f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
207373f53c4aSTejun Heo  * @wq: workqueue being flushed
207473f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
207573f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
207673f53c4aSTejun Heo  *
207773f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
207873f53c4aSTejun Heo  *
207973f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
208073f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
208173f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
208273f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
208373f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
208473f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
208573f53c4aSTejun Heo  *
208673f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
208773f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
208873f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
208973f53c4aSTejun Heo  * is returned.
209073f53c4aSTejun Heo  *
209173f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
209273f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
209373f53c4aSTejun Heo  * advanced to @work_color.
209473f53c4aSTejun Heo  *
209573f53c4aSTejun Heo  * CONTEXT:
209673f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
209773f53c4aSTejun Heo  *
209873f53c4aSTejun Heo  * RETURNS:
209973f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
210073f53c4aSTejun Heo  * otherwise.
210173f53c4aSTejun Heo  */
210273f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
210373f53c4aSTejun Heo 				      int flush_color, int work_color)
21041da177e4SLinus Torvalds {
210573f53c4aSTejun Heo 	bool wait = false;
210673f53c4aSTejun Heo 	unsigned int cpu;
21071da177e4SLinus Torvalds 
210873f53c4aSTejun Heo 	if (flush_color >= 0) {
210973f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
211073f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
2111dc186ad7SThomas Gleixner 	}
211214441960SOleg Nesterov 
2113f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
211473f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
21158b03ae3cSTejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
21161da177e4SLinus Torvalds 
21178b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
211873f53c4aSTejun Heo 
211973f53c4aSTejun Heo 		if (flush_color >= 0) {
212073f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
212173f53c4aSTejun Heo 
212273f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
212373f53c4aSTejun Heo 				cwq->flush_color = flush_color;
212473f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
212573f53c4aSTejun Heo 				wait = true;
21261da177e4SLinus Torvalds 			}
212773f53c4aSTejun Heo 		}
212873f53c4aSTejun Heo 
212973f53c4aSTejun Heo 		if (work_color >= 0) {
213073f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
213173f53c4aSTejun Heo 			cwq->work_color = work_color;
213273f53c4aSTejun Heo 		}
213373f53c4aSTejun Heo 
21348b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
21351da177e4SLinus Torvalds 	}
21361da177e4SLinus Torvalds 
213773f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
213873f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
213973f53c4aSTejun Heo 
214073f53c4aSTejun Heo 	return wait;
214183c22520SOleg Nesterov }
21421da177e4SLinus Torvalds 
21430fcb78c2SRolf Eike Beer /**
21441da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
21450fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
21461da177e4SLinus Torvalds  *
21471da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
21481da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
21491da177e4SLinus Torvalds  *
2150fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
2151fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
21521da177e4SLinus Torvalds  */
21537ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
21541da177e4SLinus Torvalds {
215573f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
215673f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
215773f53c4aSTejun Heo 		.flush_color = -1,
215873f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
215973f53c4aSTejun Heo 	};
216073f53c4aSTejun Heo 	int next_color;
2161b1f4ec17SOleg Nesterov 
21623295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
21633295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
216473f53c4aSTejun Heo 
216573f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
216673f53c4aSTejun Heo 
216773f53c4aSTejun Heo 	/*
216873f53c4aSTejun Heo 	 * Start-to-wait phase
216973f53c4aSTejun Heo 	 */
217073f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
217173f53c4aSTejun Heo 
217273f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
217373f53c4aSTejun Heo 		/*
217473f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
217573f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
217673f53c4aSTejun Heo 		 * by one.
217773f53c4aSTejun Heo 		 */
217873f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
217973f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
218073f53c4aSTejun Heo 		wq->work_color = next_color;
218173f53c4aSTejun Heo 
218273f53c4aSTejun Heo 		if (!wq->first_flusher) {
218373f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
218473f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
218573f53c4aSTejun Heo 
218673f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
218773f53c4aSTejun Heo 
218873f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
218973f53c4aSTejun Heo 						       wq->work_color)) {
219073f53c4aSTejun Heo 				/* nothing to flush, done */
219173f53c4aSTejun Heo 				wq->flush_color = next_color;
219273f53c4aSTejun Heo 				wq->first_flusher = NULL;
219373f53c4aSTejun Heo 				goto out_unlock;
219473f53c4aSTejun Heo 			}
219573f53c4aSTejun Heo 		} else {
219673f53c4aSTejun Heo 			/* wait in queue */
219773f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
219873f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
219973f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
220073f53c4aSTejun Heo 		}
220173f53c4aSTejun Heo 	} else {
220273f53c4aSTejun Heo 		/*
220373f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
220473f53c4aSTejun Heo 		 * The next flush completion will assign us
220573f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
220673f53c4aSTejun Heo 		 */
220773f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
220873f53c4aSTejun Heo 	}
220973f53c4aSTejun Heo 
221073f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
221173f53c4aSTejun Heo 
221273f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
221373f53c4aSTejun Heo 
221473f53c4aSTejun Heo 	/*
221573f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
221673f53c4aSTejun Heo 	 *
221773f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
221873f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
221973f53c4aSTejun Heo 	 */
222073f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
222173f53c4aSTejun Heo 		return;
222273f53c4aSTejun Heo 
222373f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
222473f53c4aSTejun Heo 
22254ce48b37STejun Heo 	/* we might have raced, check again with mutex held */
22264ce48b37STejun Heo 	if (wq->first_flusher != &this_flusher)
22274ce48b37STejun Heo 		goto out_unlock;
22284ce48b37STejun Heo 
222973f53c4aSTejun Heo 	wq->first_flusher = NULL;
223073f53c4aSTejun Heo 
223173f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
223273f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
223373f53c4aSTejun Heo 
223473f53c4aSTejun Heo 	while (true) {
223573f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
223673f53c4aSTejun Heo 
223773f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
223873f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
223973f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
224073f53c4aSTejun Heo 				break;
224173f53c4aSTejun Heo 			list_del_init(&next->list);
224273f53c4aSTejun Heo 			complete(&next->done);
224373f53c4aSTejun Heo 		}
224473f53c4aSTejun Heo 
224573f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
224673f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
224773f53c4aSTejun Heo 
224873f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
224973f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
225073f53c4aSTejun Heo 
225173f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
225273f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
225373f53c4aSTejun Heo 			/*
225473f53c4aSTejun Heo 			 * Assign the same color to all overflowed
225573f53c4aSTejun Heo 			 * flushers, advance work_color and append to
225673f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
225773f53c4aSTejun Heo 			 * phase for these overflowed flushers.
225873f53c4aSTejun Heo 			 */
225973f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
226073f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
226173f53c4aSTejun Heo 
226273f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
226373f53c4aSTejun Heo 
226473f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
226573f53c4aSTejun Heo 					      &wq->flusher_queue);
226673f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
226773f53c4aSTejun Heo 		}
226873f53c4aSTejun Heo 
226973f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
227073f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
227173f53c4aSTejun Heo 			break;
227273f53c4aSTejun Heo 		}
227373f53c4aSTejun Heo 
227473f53c4aSTejun Heo 		/*
227573f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
227673f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
227773f53c4aSTejun Heo 		 */
227873f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
227973f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
228073f53c4aSTejun Heo 
228173f53c4aSTejun Heo 		list_del_init(&next->list);
228273f53c4aSTejun Heo 		wq->first_flusher = next;
228373f53c4aSTejun Heo 
228473f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
228573f53c4aSTejun Heo 			break;
228673f53c4aSTejun Heo 
228773f53c4aSTejun Heo 		/*
228873f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
228973f53c4aSTejun Heo 		 * flusher and repeat cascading.
229073f53c4aSTejun Heo 		 */
229173f53c4aSTejun Heo 		wq->first_flusher = NULL;
229273f53c4aSTejun Heo 	}
229373f53c4aSTejun Heo 
229473f53c4aSTejun Heo out_unlock:
229573f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
22961da177e4SLinus Torvalds }
2297ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
22981da177e4SLinus Torvalds 
2299db700897SOleg Nesterov /**
2300db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
2301db700897SOleg Nesterov  * @work: the work which is to be flushed
2302db700897SOleg Nesterov  *
2303a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
2304a67da70dSOleg Nesterov  *
2305db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
2306db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
2307db700897SOleg Nesterov  * sense to use this function.
2308db700897SOleg Nesterov  */
2309db700897SOleg Nesterov int flush_work(struct work_struct *work)
2310db700897SOleg Nesterov {
2311affee4b2STejun Heo 	struct worker *worker = NULL;
23128b03ae3cSTejun Heo 	struct global_cwq *gcwq;
2313db700897SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
2314db700897SOleg Nesterov 	struct wq_barrier barr;
2315db700897SOleg Nesterov 
2316db700897SOleg Nesterov 	might_sleep();
23177a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
23187a22ad75STejun Heo 	if (!gcwq)
2319db700897SOleg Nesterov 		return 0;
2320db700897SOleg Nesterov 
23218b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2322db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
2323db700897SOleg Nesterov 		/*
2324db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
23257a22ad75STejun Heo 		 * If it was re-queued to a different gcwq under us, we
23267a22ad75STejun Heo 		 * are not going to wait.
2327db700897SOleg Nesterov 		 */
2328db700897SOleg Nesterov 		smp_rmb();
23297a22ad75STejun Heo 		cwq = get_work_cwq(work);
23307a22ad75STejun Heo 		if (unlikely(!cwq || gcwq != cwq->gcwq))
23314690c4abSTejun Heo 			goto already_gone;
2332db700897SOleg Nesterov 	} else {
23337a22ad75STejun Heo 		worker = find_worker_executing_work(gcwq, work);
2334affee4b2STejun Heo 		if (!worker)
23354690c4abSTejun Heo 			goto already_gone;
23367a22ad75STejun Heo 		cwq = worker->current_cwq;
2337db700897SOleg Nesterov 	}
2338db700897SOleg Nesterov 
2339affee4b2STejun Heo 	insert_wq_barrier(cwq, &barr, work, worker);
23408b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
23417a22ad75STejun Heo 
2342db700897SOleg Nesterov 	lock_map_acquire(&cwq->wq->lockdep_map);
2343db700897SOleg Nesterov 	lock_map_release(&cwq->wq->lockdep_map);
2344db700897SOleg Nesterov 
2345db700897SOleg Nesterov 	wait_for_completion(&barr.done);
2346dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
2347db700897SOleg Nesterov 	return 1;
23484690c4abSTejun Heo already_gone:
23498b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
23504690c4abSTejun Heo 	return 0;
2351db700897SOleg Nesterov }
2352db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
2353db700897SOleg Nesterov 
23546e84d644SOleg Nesterov /*
23551f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
23566e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
23576e84d644SOleg Nesterov  */
23586e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
23596e84d644SOleg Nesterov {
23608b03ae3cSTejun Heo 	struct global_cwq *gcwq;
23611f1f642eSOleg Nesterov 	int ret = -1;
23626e84d644SOleg Nesterov 
236322df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
23641f1f642eSOleg Nesterov 		return 0;
23656e84d644SOleg Nesterov 
23666e84d644SOleg Nesterov 	/*
23676e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
23686e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
23696e84d644SOleg Nesterov 	 */
23707a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
23717a22ad75STejun Heo 	if (!gcwq)
23726e84d644SOleg Nesterov 		return ret;
23736e84d644SOleg Nesterov 
23748b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
23756e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
23766e84d644SOleg Nesterov 		/*
23777a22ad75STejun Heo 		 * This work is queued, but perhaps we locked the wrong gcwq.
23786e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
23796e84d644SOleg Nesterov 		 * insert_work()->wmb().
23806e84d644SOleg Nesterov 		 */
23816e84d644SOleg Nesterov 		smp_rmb();
23827a22ad75STejun Heo 		if (gcwq == get_work_gcwq(work)) {
2383dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
23846e84d644SOleg Nesterov 			list_del_init(&work->entry);
23857a22ad75STejun Heo 			cwq_dec_nr_in_flight(get_work_cwq(work),
23867a22ad75STejun Heo 					     get_work_color(work));
23876e84d644SOleg Nesterov 			ret = 1;
23886e84d644SOleg Nesterov 		}
23896e84d644SOleg Nesterov 	}
23908b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
23916e84d644SOleg Nesterov 
23926e84d644SOleg Nesterov 	return ret;
23936e84d644SOleg Nesterov }
23946e84d644SOleg Nesterov 
23957a22ad75STejun Heo static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2396b89deed3SOleg Nesterov {
2397b89deed3SOleg Nesterov 	struct wq_barrier barr;
2398affee4b2STejun Heo 	struct worker *worker;
2399b89deed3SOleg Nesterov 
24008b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2401b89deed3SOleg Nesterov 
24027a22ad75STejun Heo 	worker = find_worker_executing_work(gcwq, work);
24037a22ad75STejun Heo 	if (unlikely(worker))
24047a22ad75STejun Heo 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2405affee4b2STejun Heo 
24068b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
2407b89deed3SOleg Nesterov 
2408affee4b2STejun Heo 	if (unlikely(worker)) {
2409b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
2410dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
2411dc186ad7SThomas Gleixner 	}
2412b89deed3SOleg Nesterov }
2413b89deed3SOleg Nesterov 
24146e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
2415b89deed3SOleg Nesterov {
2416b1f4ec17SOleg Nesterov 	int cpu;
2417b89deed3SOleg Nesterov 
2418f293ea92SOleg Nesterov 	might_sleep();
2419f293ea92SOleg Nesterov 
24203295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
24213295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
24224e6045f1SJohannes Berg 
2423f3421797STejun Heo 	for_each_gcwq_cpu(cpu)
24247a22ad75STejun Heo 		wait_on_cpu_work(get_gcwq(cpu), work);
24256e84d644SOleg Nesterov }
24266e84d644SOleg Nesterov 
24271f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
24281f1f642eSOleg Nesterov 				struct timer_list* timer)
24291f1f642eSOleg Nesterov {
24301f1f642eSOleg Nesterov 	int ret;
24311f1f642eSOleg Nesterov 
24321f1f642eSOleg Nesterov 	do {
24331f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
24341f1f642eSOleg Nesterov 		if (!ret)
24351f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
24361f1f642eSOleg Nesterov 		wait_on_work(work);
24371f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
24381f1f642eSOleg Nesterov 
24397a22ad75STejun Heo 	clear_work_data(work);
24401f1f642eSOleg Nesterov 	return ret;
24411f1f642eSOleg Nesterov }
24421f1f642eSOleg Nesterov 
24436e84d644SOleg Nesterov /**
24446e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
24456e84d644SOleg Nesterov  * @work: the work which is to be flushed
24466e84d644SOleg Nesterov  *
24471f1f642eSOleg Nesterov  * Returns true if @work was pending.
24481f1f642eSOleg Nesterov  *
24496e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
24506e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
24516e84d644SOleg Nesterov  * has completed.
24526e84d644SOleg Nesterov  *
24536e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
24546e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
24556e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
24566e84d644SOleg Nesterov  * workqueue.
24576e84d644SOleg Nesterov  *
24586e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
24596e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
24606e84d644SOleg Nesterov  *
24616e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
24626e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
24636e84d644SOleg Nesterov  */
24641f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
24656e84d644SOleg Nesterov {
24661f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
2467b89deed3SOleg Nesterov }
246828e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
2469b89deed3SOleg Nesterov 
24706e84d644SOleg Nesterov /**
2471f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
24726e84d644SOleg Nesterov  * @dwork: the delayed work struct
24736e84d644SOleg Nesterov  *
24741f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
24751f1f642eSOleg Nesterov  *
24766e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
24776e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
24786e84d644SOleg Nesterov  */
24791f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
24806e84d644SOleg Nesterov {
24811f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
24826e84d644SOleg Nesterov }
2483f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
24841da177e4SLinus Torvalds 
24850fcb78c2SRolf Eike Beer /**
24860fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
24870fcb78c2SRolf Eike Beer  * @work: job to be done
24880fcb78c2SRolf Eike Beer  *
24895b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
24905b0f437dSBart Van Assche  * non-zero otherwise.
24915b0f437dSBart Van Assche  *
24925b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
24935b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
24945b0f437dSBart Van Assche  * workqueue otherwise.
24950fcb78c2SRolf Eike Beer  */
24967ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
24971da177e4SLinus Torvalds {
2498d320c038STejun Heo 	return queue_work(system_wq, work);
24991da177e4SLinus Torvalds }
2500ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
25011da177e4SLinus Torvalds 
2502c1a220e7SZhang Rui /*
2503c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
2504c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
2505c1a220e7SZhang Rui  * @work: job to be done
2506c1a220e7SZhang Rui  *
2507c1a220e7SZhang Rui  * This puts a job on a specific cpu
2508c1a220e7SZhang Rui  */
2509c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
2510c1a220e7SZhang Rui {
2511d320c038STejun Heo 	return queue_work_on(cpu, system_wq, work);
2512c1a220e7SZhang Rui }
2513c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
2514c1a220e7SZhang Rui 
25150fcb78c2SRolf Eike Beer /**
25160fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
251752bad64dSDavid Howells  * @dwork: job to be done
251852bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
25190fcb78c2SRolf Eike Beer  *
25200fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
25210fcb78c2SRolf Eike Beer  * workqueue.
25220fcb78c2SRolf Eike Beer  */
25237ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
252482f67cd9SIngo Molnar 					unsigned long delay)
25251da177e4SLinus Torvalds {
2526d320c038STejun Heo 	return queue_delayed_work(system_wq, dwork, delay);
25271da177e4SLinus Torvalds }
2528ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
25291da177e4SLinus Torvalds 
25300fcb78c2SRolf Eike Beer /**
25318c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
25328c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
25338c53e463SLinus Torvalds  *
25348c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
25358c53e463SLinus Torvalds  */
25368c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
25378c53e463SLinus Torvalds {
25388c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
25397a22ad75STejun Heo 		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
25404690c4abSTejun Heo 			     &dwork->work);
25418c53e463SLinus Torvalds 		put_cpu();
25428c53e463SLinus Torvalds 	}
25438c53e463SLinus Torvalds 	flush_work(&dwork->work);
25448c53e463SLinus Torvalds }
25458c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
25468c53e463SLinus Torvalds 
25478c53e463SLinus Torvalds /**
25480fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
25490fcb78c2SRolf Eike Beer  * @cpu: cpu to use
255052bad64dSDavid Howells  * @dwork: job to be done
25510fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
25520fcb78c2SRolf Eike Beer  *
25530fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
25540fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
25550fcb78c2SRolf Eike Beer  */
25561da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
255752bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
25581da177e4SLinus Torvalds {
2559d320c038STejun Heo 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
25601da177e4SLinus Torvalds }
2561ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
25621da177e4SLinus Torvalds 
2563b6136773SAndrew Morton /**
2564b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
2565b6136773SAndrew Morton  * @func: the function to call
2566b6136773SAndrew Morton  *
2567b6136773SAndrew Morton  * Returns zero on success.
2568b6136773SAndrew Morton  * Returns -ve errno on failure.
2569b6136773SAndrew Morton  *
2570b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
2571b6136773SAndrew Morton  */
257265f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
257315316ba8SChristoph Lameter {
257415316ba8SChristoph Lameter 	int cpu;
257538f51568SNamhyung Kim 	struct work_struct __percpu *works;
257615316ba8SChristoph Lameter 
2577b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
2578b6136773SAndrew Morton 	if (!works)
257915316ba8SChristoph Lameter 		return -ENOMEM;
2580b6136773SAndrew Morton 
258195402b38SGautham R Shenoy 	get_online_cpus();
258293981800STejun Heo 
258315316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
25849bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
25859bfb1839SIngo Molnar 
25869bfb1839SIngo Molnar 		INIT_WORK(work, func);
25878de6d308SOleg Nesterov 		schedule_work_on(cpu, work);
258815316ba8SChristoph Lameter 	}
258993981800STejun Heo 
259093981800STejun Heo 	for_each_online_cpu(cpu)
25918616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
259293981800STejun Heo 
259395402b38SGautham R Shenoy 	put_online_cpus();
2594b6136773SAndrew Morton 	free_percpu(works);
259515316ba8SChristoph Lameter 	return 0;
259615316ba8SChristoph Lameter }
259715316ba8SChristoph Lameter 
2598eef6a7d5SAlan Stern /**
2599eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
2600eef6a7d5SAlan Stern  *
2601eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
2602eef6a7d5SAlan Stern  * completion.
2603eef6a7d5SAlan Stern  *
2604eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
2605eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
2606eef6a7d5SAlan Stern  * will lead to deadlock:
2607eef6a7d5SAlan Stern  *
2608eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
2609eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
2610eef6a7d5SAlan Stern  *
2611eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
2612eef6a7d5SAlan Stern  *
2613eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
2614eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
2615eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
2616eef6a7d5SAlan Stern  *
2617eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
2618eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
2619eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
2620eef6a7d5SAlan Stern  * cancel_work_sync() instead.
2621eef6a7d5SAlan Stern  */
26221da177e4SLinus Torvalds void flush_scheduled_work(void)
26231da177e4SLinus Torvalds {
2624d320c038STejun Heo 	flush_workqueue(system_wq);
26251da177e4SLinus Torvalds }
2626ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
26271da177e4SLinus Torvalds 
26281da177e4SLinus Torvalds /**
26291fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
26301fa44ecaSJames Bottomley  * @fn:		the function to execute
26311fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
26321fa44ecaSJames Bottomley  *		be available when the work executes)
26331fa44ecaSJames Bottomley  *
26341fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
26351fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
26361fa44ecaSJames Bottomley  *
26371fa44ecaSJames Bottomley  * Returns:	0 - function was executed
26381fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
26391fa44ecaSJames Bottomley  */
264065f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
26411fa44ecaSJames Bottomley {
26421fa44ecaSJames Bottomley 	if (!in_interrupt()) {
264365f27f38SDavid Howells 		fn(&ew->work);
26441fa44ecaSJames Bottomley 		return 0;
26451fa44ecaSJames Bottomley 	}
26461fa44ecaSJames Bottomley 
264765f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
26481fa44ecaSJames Bottomley 	schedule_work(&ew->work);
26491fa44ecaSJames Bottomley 
26501fa44ecaSJames Bottomley 	return 1;
26511fa44ecaSJames Bottomley }
26521fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
26531fa44ecaSJames Bottomley 
26541da177e4SLinus Torvalds int keventd_up(void)
26551da177e4SLinus Torvalds {
2656d320c038STejun Heo 	return system_wq != NULL;
26571da177e4SLinus Torvalds }
26581da177e4SLinus Torvalds 
2659bdbc5dd7STejun Heo static int alloc_cwqs(struct workqueue_struct *wq)
26601da177e4SLinus Torvalds {
26613af24433SOleg Nesterov 	/*
26620f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
26630f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
26640f900049STejun Heo 	 * unsigned long long.
26653af24433SOleg Nesterov 	 */
26660f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
26670f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
26680f900049STejun Heo 				   __alignof__(unsigned long long));
2669931ac77eSTejun Heo #ifdef CONFIG_SMP
2670931ac77eSTejun Heo 	bool percpu = !(wq->flags & WQ_UNBOUND);
2671931ac77eSTejun Heo #else
2672931ac77eSTejun Heo 	bool percpu = false;
2673931ac77eSTejun Heo #endif
26743af24433SOleg Nesterov 
2675931ac77eSTejun Heo 	if (percpu)
2676f3421797STejun Heo 		wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2677931ac77eSTejun Heo 	else {
26780f900049STejun Heo 		void *ptr;
2679e1d8aa9fSFrederic Weisbecker 
26800f900049STejun Heo 		/*
2681f3421797STejun Heo 		 * Allocate enough room to align cwq and put an extra
2682f3421797STejun Heo 		 * pointer at the end pointing back to the originally
2683f3421797STejun Heo 		 * allocated pointer which will be used for free.
26840f900049STejun Heo 		 */
2685bdbc5dd7STejun Heo 		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2686bdbc5dd7STejun Heo 		if (ptr) {
2687bdbc5dd7STejun Heo 			wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2688bdbc5dd7STejun Heo 			*(void **)(wq->cpu_wq.single + 1) = ptr;
2689bdbc5dd7STejun Heo 		}
26903af24433SOleg Nesterov 	}
26913af24433SOleg Nesterov 
26920f900049STejun Heo 	/* just in case, make sure it's actually aligned */
2693bdbc5dd7STejun Heo 	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2694bdbc5dd7STejun Heo 	return wq->cpu_wq.v ? 0 : -ENOMEM;
26950f900049STejun Heo }
26960f900049STejun Heo 
2697bdbc5dd7STejun Heo static void free_cwqs(struct workqueue_struct *wq)
269806ba38a9SOleg Nesterov {
2699931ac77eSTejun Heo #ifdef CONFIG_SMP
2700931ac77eSTejun Heo 	bool percpu = !(wq->flags & WQ_UNBOUND);
2701931ac77eSTejun Heo #else
2702931ac77eSTejun Heo 	bool percpu = false;
2703931ac77eSTejun Heo #endif
270406ba38a9SOleg Nesterov 
2705931ac77eSTejun Heo 	if (percpu)
2706bdbc5dd7STejun Heo 		free_percpu(wq->cpu_wq.pcpu);
2707f3421797STejun Heo 	else if (wq->cpu_wq.single) {
2708f3421797STejun Heo 		/* the pointer to free is stored right after the cwq */
2709f3421797STejun Heo 		kfree(*(void **)(wq->cpu_wq.single + 1));
271006ba38a9SOleg Nesterov 	}
271106ba38a9SOleg Nesterov }
271206ba38a9SOleg Nesterov 
2713f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags,
2714f3421797STejun Heo 			       const char *name)
2715b71ab8c2STejun Heo {
2716f3421797STejun Heo 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2717f3421797STejun Heo 
2718f3421797STejun Heo 	if (max_active < 1 || max_active > lim)
2719b71ab8c2STejun Heo 		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2720b71ab8c2STejun Heo 		       "is out of range, clamping between %d and %d\n",
2721f3421797STejun Heo 		       max_active, name, 1, lim);
2722b71ab8c2STejun Heo 
2723f3421797STejun Heo 	return clamp_val(max_active, 1, lim);
2724b71ab8c2STejun Heo }
2725b71ab8c2STejun Heo 
2726d320c038STejun Heo struct workqueue_struct *__alloc_workqueue_key(const char *name,
272797e37d7bSTejun Heo 					       unsigned int flags,
27281e19ffc6STejun Heo 					       int max_active,
2729eb13ba87SJohannes Berg 					       struct lock_class_key *key,
2730eb13ba87SJohannes Berg 					       const char *lock_name)
27313af24433SOleg Nesterov {
27323af24433SOleg Nesterov 	struct workqueue_struct *wq;
2733c34056a3STejun Heo 	unsigned int cpu;
27343af24433SOleg Nesterov 
2735f3421797STejun Heo 	/*
2736f3421797STejun Heo 	 * Unbound workqueues aren't concurrency managed and should be
2737f3421797STejun Heo 	 * dispatched to workers immediately.
2738f3421797STejun Heo 	 */
2739f3421797STejun Heo 	if (flags & WQ_UNBOUND)
2740f3421797STejun Heo 		flags |= WQ_HIGHPRI;
2741f3421797STejun Heo 
2742d320c038STejun Heo 	max_active = max_active ?: WQ_DFL_ACTIVE;
2743f3421797STejun Heo 	max_active = wq_clamp_max_active(max_active, flags, name);
27443af24433SOleg Nesterov 
27453af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
27463af24433SOleg Nesterov 	if (!wq)
27474690c4abSTejun Heo 		goto err;
27483af24433SOleg Nesterov 
274997e37d7bSTejun Heo 	wq->flags = flags;
2750a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
275173f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
275273f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
275373f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
275473f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
27553af24433SOleg Nesterov 
27563af24433SOleg Nesterov 	wq->name = name;
2757eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2758cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
27593af24433SOleg Nesterov 
2760bdbc5dd7STejun Heo 	if (alloc_cwqs(wq) < 0)
2761bdbc5dd7STejun Heo 		goto err;
2762bdbc5dd7STejun Heo 
2763f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
27641537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
27658b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
27661537663fSTejun Heo 
27670f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
27688b03ae3cSTejun Heo 		cwq->gcwq = gcwq;
2769c34056a3STejun Heo 		cwq->wq = wq;
277073f53c4aSTejun Heo 		cwq->flush_color = -1;
27711e19ffc6STejun Heo 		cwq->max_active = max_active;
27721e19ffc6STejun Heo 		INIT_LIST_HEAD(&cwq->delayed_works);
2773e22bee78STejun Heo 	}
27741537663fSTejun Heo 
2775e22bee78STejun Heo 	if (flags & WQ_RESCUER) {
2776e22bee78STejun Heo 		struct worker *rescuer;
2777e22bee78STejun Heo 
2778f2e005aaSTejun Heo 		if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2779e22bee78STejun Heo 			goto err;
2780e22bee78STejun Heo 
2781e22bee78STejun Heo 		wq->rescuer = rescuer = alloc_worker();
2782e22bee78STejun Heo 		if (!rescuer)
2783e22bee78STejun Heo 			goto err;
2784e22bee78STejun Heo 
2785e22bee78STejun Heo 		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2786e22bee78STejun Heo 		if (IS_ERR(rescuer->task))
2787e22bee78STejun Heo 			goto err;
2788e22bee78STejun Heo 
2789e22bee78STejun Heo 		rescuer->task->flags |= PF_THREAD_BOUND;
2790e22bee78STejun Heo 		wake_up_process(rescuer->task);
27913af24433SOleg Nesterov 	}
27921537663fSTejun Heo 
27933af24433SOleg Nesterov 	/*
2794a0a1a5fdSTejun Heo 	 * workqueue_lock protects global freeze state and workqueues
2795a0a1a5fdSTejun Heo 	 * list.  Grab it, set max_active accordingly and add the new
2796a0a1a5fdSTejun Heo 	 * workqueue to workqueues list.
27973af24433SOleg Nesterov 	 */
27983af24433SOleg Nesterov 	spin_lock(&workqueue_lock);
2799a0a1a5fdSTejun Heo 
2800a0a1a5fdSTejun Heo 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2801f3421797STejun Heo 		for_each_cwq_cpu(cpu, wq)
2802a0a1a5fdSTejun Heo 			get_cwq(cpu, wq)->max_active = 0;
2803a0a1a5fdSTejun Heo 
28043af24433SOleg Nesterov 	list_add(&wq->list, &workqueues);
2805a0a1a5fdSTejun Heo 
28063af24433SOleg Nesterov 	spin_unlock(&workqueue_lock);
28073af24433SOleg Nesterov 
28083af24433SOleg Nesterov 	return wq;
28094690c4abSTejun Heo err:
28104690c4abSTejun Heo 	if (wq) {
2811bdbc5dd7STejun Heo 		free_cwqs(wq);
2812f2e005aaSTejun Heo 		free_mayday_mask(wq->mayday_mask);
2813e22bee78STejun Heo 		kfree(wq->rescuer);
28144690c4abSTejun Heo 		kfree(wq);
28153af24433SOleg Nesterov 	}
28164690c4abSTejun Heo 	return NULL;
28171da177e4SLinus Torvalds }
2818d320c038STejun Heo EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
28191da177e4SLinus Torvalds 
28203af24433SOleg Nesterov /**
28213af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
28223af24433SOleg Nesterov  * @wq: target workqueue
28233af24433SOleg Nesterov  *
28243af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
28253af24433SOleg Nesterov  */
28263af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
28273af24433SOleg Nesterov {
2828c8e55f36STejun Heo 	unsigned int cpu;
28293af24433SOleg Nesterov 
2830a0a1a5fdSTejun Heo 	flush_workqueue(wq);
2831a0a1a5fdSTejun Heo 
2832a0a1a5fdSTejun Heo 	/*
2833a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
2834a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
2835a0a1a5fdSTejun Heo 	 */
283695402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
28373af24433SOleg Nesterov 	list_del(&wq->list);
283895402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
28393af24433SOleg Nesterov 
2840e22bee78STejun Heo 	/* sanity check */
2841f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
284273f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
284373f53c4aSTejun Heo 		int i;
28443af24433SOleg Nesterov 
284573f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
284673f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
28471e19ffc6STejun Heo 		BUG_ON(cwq->nr_active);
28481e19ffc6STejun Heo 		BUG_ON(!list_empty(&cwq->delayed_works));
284973f53c4aSTejun Heo 	}
28501537663fSTejun Heo 
2851e22bee78STejun Heo 	if (wq->flags & WQ_RESCUER) {
2852e22bee78STejun Heo 		kthread_stop(wq->rescuer->task);
2853f2e005aaSTejun Heo 		free_mayday_mask(wq->mayday_mask);
28548d9df9f0SXiaotian Feng 		kfree(wq->rescuer);
2855e22bee78STejun Heo 	}
2856e22bee78STejun Heo 
2857bdbc5dd7STejun Heo 	free_cwqs(wq);
28583af24433SOleg Nesterov 	kfree(wq);
28593af24433SOleg Nesterov }
28603af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
28613af24433SOleg Nesterov 
2862dcd989cbSTejun Heo /**
2863dcd989cbSTejun Heo  * workqueue_set_max_active - adjust max_active of a workqueue
2864dcd989cbSTejun Heo  * @wq: target workqueue
2865dcd989cbSTejun Heo  * @max_active: new max_active value.
2866dcd989cbSTejun Heo  *
2867dcd989cbSTejun Heo  * Set max_active of @wq to @max_active.
2868dcd989cbSTejun Heo  *
2869dcd989cbSTejun Heo  * CONTEXT:
2870dcd989cbSTejun Heo  * Don't call from IRQ context.
2871dcd989cbSTejun Heo  */
2872dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2873dcd989cbSTejun Heo {
2874dcd989cbSTejun Heo 	unsigned int cpu;
2875dcd989cbSTejun Heo 
2876f3421797STejun Heo 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2877dcd989cbSTejun Heo 
2878dcd989cbSTejun Heo 	spin_lock(&workqueue_lock);
2879dcd989cbSTejun Heo 
2880dcd989cbSTejun Heo 	wq->saved_max_active = max_active;
2881dcd989cbSTejun Heo 
2882f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
2883dcd989cbSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
2884dcd989cbSTejun Heo 
2885dcd989cbSTejun Heo 		spin_lock_irq(&gcwq->lock);
2886dcd989cbSTejun Heo 
2887dcd989cbSTejun Heo 		if (!(wq->flags & WQ_FREEZEABLE) ||
2888dcd989cbSTejun Heo 		    !(gcwq->flags & GCWQ_FREEZING))
2889dcd989cbSTejun Heo 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
2890dcd989cbSTejun Heo 
2891dcd989cbSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2892dcd989cbSTejun Heo 	}
2893dcd989cbSTejun Heo 
2894dcd989cbSTejun Heo 	spin_unlock(&workqueue_lock);
2895dcd989cbSTejun Heo }
2896dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2897dcd989cbSTejun Heo 
2898dcd989cbSTejun Heo /**
2899dcd989cbSTejun Heo  * workqueue_congested - test whether a workqueue is congested
2900dcd989cbSTejun Heo  * @cpu: CPU in question
2901dcd989cbSTejun Heo  * @wq: target workqueue
2902dcd989cbSTejun Heo  *
2903dcd989cbSTejun Heo  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
2904dcd989cbSTejun Heo  * no synchronization around this function and the test result is
2905dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2906dcd989cbSTejun Heo  *
2907dcd989cbSTejun Heo  * RETURNS:
2908dcd989cbSTejun Heo  * %true if congested, %false otherwise.
2909dcd989cbSTejun Heo  */
2910dcd989cbSTejun Heo bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2911dcd989cbSTejun Heo {
2912dcd989cbSTejun Heo 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2913dcd989cbSTejun Heo 
2914dcd989cbSTejun Heo 	return !list_empty(&cwq->delayed_works);
2915dcd989cbSTejun Heo }
2916dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested);
2917dcd989cbSTejun Heo 
2918dcd989cbSTejun Heo /**
2919dcd989cbSTejun Heo  * work_cpu - return the last known associated cpu for @work
2920dcd989cbSTejun Heo  * @work: the work of interest
2921dcd989cbSTejun Heo  *
2922dcd989cbSTejun Heo  * RETURNS:
2923bdbc5dd7STejun Heo  * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
2924dcd989cbSTejun Heo  */
2925dcd989cbSTejun Heo unsigned int work_cpu(struct work_struct *work)
2926dcd989cbSTejun Heo {
2927dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2928dcd989cbSTejun Heo 
2929bdbc5dd7STejun Heo 	return gcwq ? gcwq->cpu : WORK_CPU_NONE;
2930dcd989cbSTejun Heo }
2931dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_cpu);
2932dcd989cbSTejun Heo 
2933dcd989cbSTejun Heo /**
2934dcd989cbSTejun Heo  * work_busy - test whether a work is currently pending or running
2935dcd989cbSTejun Heo  * @work: the work to be tested
2936dcd989cbSTejun Heo  *
2937dcd989cbSTejun Heo  * Test whether @work is currently pending or running.  There is no
2938dcd989cbSTejun Heo  * synchronization around this function and the test result is
2939dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2940dcd989cbSTejun Heo  * Especially for reentrant wqs, the pending state might hide the
2941dcd989cbSTejun Heo  * running state.
2942dcd989cbSTejun Heo  *
2943dcd989cbSTejun Heo  * RETURNS:
2944dcd989cbSTejun Heo  * OR'd bitmask of WORK_BUSY_* bits.
2945dcd989cbSTejun Heo  */
2946dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work)
2947dcd989cbSTejun Heo {
2948dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2949dcd989cbSTejun Heo 	unsigned long flags;
2950dcd989cbSTejun Heo 	unsigned int ret = 0;
2951dcd989cbSTejun Heo 
2952dcd989cbSTejun Heo 	if (!gcwq)
2953dcd989cbSTejun Heo 		return false;
2954dcd989cbSTejun Heo 
2955dcd989cbSTejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
2956dcd989cbSTejun Heo 
2957dcd989cbSTejun Heo 	if (work_pending(work))
2958dcd989cbSTejun Heo 		ret |= WORK_BUSY_PENDING;
2959dcd989cbSTejun Heo 	if (find_worker_executing_work(gcwq, work))
2960dcd989cbSTejun Heo 		ret |= WORK_BUSY_RUNNING;
2961dcd989cbSTejun Heo 
2962dcd989cbSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
2963dcd989cbSTejun Heo 
2964dcd989cbSTejun Heo 	return ret;
2965dcd989cbSTejun Heo }
2966dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy);
2967dcd989cbSTejun Heo 
2968db7bccf4STejun Heo /*
2969db7bccf4STejun Heo  * CPU hotplug.
2970db7bccf4STejun Heo  *
2971e22bee78STejun Heo  * There are two challenges in supporting CPU hotplug.  Firstly, there
2972e22bee78STejun Heo  * are a lot of assumptions on strong associations among work, cwq and
2973e22bee78STejun Heo  * gcwq which make migrating pending and scheduled works very
2974e22bee78STejun Heo  * difficult to implement without impacting hot paths.  Secondly,
2975e22bee78STejun Heo  * gcwqs serve mix of short, long and very long running works making
2976e22bee78STejun Heo  * blocked draining impractical.
2977e22bee78STejun Heo  *
2978e22bee78STejun Heo  * This is solved by allowing a gcwq to be detached from CPU, running
2979e22bee78STejun Heo  * it with unbound (rogue) workers and allowing it to be reattached
2980e22bee78STejun Heo  * later if the cpu comes back online.  A separate thread is created
2981e22bee78STejun Heo  * to govern a gcwq in such state and is called the trustee of the
2982e22bee78STejun Heo  * gcwq.
2983db7bccf4STejun Heo  *
2984db7bccf4STejun Heo  * Trustee states and their descriptions.
2985db7bccf4STejun Heo  *
2986db7bccf4STejun Heo  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
2987db7bccf4STejun Heo  *		new trustee is started with this state.
2988db7bccf4STejun Heo  *
2989db7bccf4STejun Heo  * IN_CHARGE	Once started, trustee will enter this state after
2990e22bee78STejun Heo  *		assuming the manager role and making all existing
2991e22bee78STejun Heo  *		workers rogue.  DOWN_PREPARE waits for trustee to
2992e22bee78STejun Heo  *		enter this state.  After reaching IN_CHARGE, trustee
2993e22bee78STejun Heo  *		tries to execute the pending worklist until it's empty
2994e22bee78STejun Heo  *		and the state is set to BUTCHER, or the state is set
2995e22bee78STejun Heo  *		to RELEASE.
2996db7bccf4STejun Heo  *
2997db7bccf4STejun Heo  * BUTCHER	Command state which is set by the cpu callback after
2998db7bccf4STejun Heo  *		the cpu has went down.  Once this state is set trustee
2999db7bccf4STejun Heo  *		knows that there will be no new works on the worklist
3000db7bccf4STejun Heo  *		and once the worklist is empty it can proceed to
3001db7bccf4STejun Heo  *		killing idle workers.
3002db7bccf4STejun Heo  *
3003db7bccf4STejun Heo  * RELEASE	Command state which is set by the cpu callback if the
3004db7bccf4STejun Heo  *		cpu down has been canceled or it has come online
3005db7bccf4STejun Heo  *		again.  After recognizing this state, trustee stops
3006e22bee78STejun Heo  *		trying to drain or butcher and clears ROGUE, rebinds
3007e22bee78STejun Heo  *		all remaining workers back to the cpu and releases
3008e22bee78STejun Heo  *		manager role.
3009db7bccf4STejun Heo  *
3010db7bccf4STejun Heo  * DONE		Trustee will enter this state after BUTCHER or RELEASE
3011db7bccf4STejun Heo  *		is complete.
3012db7bccf4STejun Heo  *
3013db7bccf4STejun Heo  *          trustee                 CPU                draining
3014db7bccf4STejun Heo  *         took over                down               complete
3015db7bccf4STejun Heo  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3016db7bccf4STejun Heo  *                        |                     |                  ^
3017db7bccf4STejun Heo  *                        | CPU is back online  v   return workers |
3018db7bccf4STejun Heo  *                         ----------------> RELEASE --------------
3019db7bccf4STejun Heo  */
3020db7bccf4STejun Heo 
3021db7bccf4STejun Heo /**
3022db7bccf4STejun Heo  * trustee_wait_event_timeout - timed event wait for trustee
3023db7bccf4STejun Heo  * @cond: condition to wait for
3024db7bccf4STejun Heo  * @timeout: timeout in jiffies
3025db7bccf4STejun Heo  *
3026db7bccf4STejun Heo  * wait_event_timeout() for trustee to use.  Handles locking and
3027db7bccf4STejun Heo  * checks for RELEASE request.
3028db7bccf4STejun Heo  *
3029db7bccf4STejun Heo  * CONTEXT:
3030db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3031db7bccf4STejun Heo  * multiple times.  To be used by trustee.
3032db7bccf4STejun Heo  *
3033db7bccf4STejun Heo  * RETURNS:
3034db7bccf4STejun Heo  * Positive indicating left time if @cond is satisfied, 0 if timed
3035db7bccf4STejun Heo  * out, -1 if canceled.
3036db7bccf4STejun Heo  */
3037db7bccf4STejun Heo #define trustee_wait_event_timeout(cond, timeout) ({			\
3038db7bccf4STejun Heo 	long __ret = (timeout);						\
3039db7bccf4STejun Heo 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
3040db7bccf4STejun Heo 	       __ret) {							\
3041db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);				\
3042db7bccf4STejun Heo 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
3043db7bccf4STejun Heo 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
3044db7bccf4STejun Heo 			__ret);						\
3045db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);				\
3046db7bccf4STejun Heo 	}								\
3047db7bccf4STejun Heo 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
3048db7bccf4STejun Heo })
3049db7bccf4STejun Heo 
3050db7bccf4STejun Heo /**
3051db7bccf4STejun Heo  * trustee_wait_event - event wait for trustee
3052db7bccf4STejun Heo  * @cond: condition to wait for
3053db7bccf4STejun Heo  *
3054db7bccf4STejun Heo  * wait_event() for trustee to use.  Automatically handles locking and
3055db7bccf4STejun Heo  * checks for CANCEL request.
3056db7bccf4STejun Heo  *
3057db7bccf4STejun Heo  * CONTEXT:
3058db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3059db7bccf4STejun Heo  * multiple times.  To be used by trustee.
3060db7bccf4STejun Heo  *
3061db7bccf4STejun Heo  * RETURNS:
3062db7bccf4STejun Heo  * 0 if @cond is satisfied, -1 if canceled.
3063db7bccf4STejun Heo  */
3064db7bccf4STejun Heo #define trustee_wait_event(cond) ({					\
3065db7bccf4STejun Heo 	long __ret1;							\
3066db7bccf4STejun Heo 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3067db7bccf4STejun Heo 	__ret1 < 0 ? -1 : 0;						\
3068db7bccf4STejun Heo })
3069db7bccf4STejun Heo 
3070db7bccf4STejun Heo static int __cpuinit trustee_thread(void *__gcwq)
3071db7bccf4STejun Heo {
3072db7bccf4STejun Heo 	struct global_cwq *gcwq = __gcwq;
3073db7bccf4STejun Heo 	struct worker *worker;
3074e22bee78STejun Heo 	struct work_struct *work;
3075db7bccf4STejun Heo 	struct hlist_node *pos;
3076e22bee78STejun Heo 	long rc;
3077db7bccf4STejun Heo 	int i;
3078db7bccf4STejun Heo 
3079db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
3080db7bccf4STejun Heo 
3081db7bccf4STejun Heo 	spin_lock_irq(&gcwq->lock);
3082db7bccf4STejun Heo 	/*
3083e22bee78STejun Heo 	 * Claim the manager position and make all workers rogue.
3084e22bee78STejun Heo 	 * Trustee must be bound to the target cpu and can't be
3085e22bee78STejun Heo 	 * cancelled.
3086db7bccf4STejun Heo 	 */
3087db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
3088e22bee78STejun Heo 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3089e22bee78STejun Heo 	BUG_ON(rc < 0);
3090e22bee78STejun Heo 
3091e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
3092db7bccf4STejun Heo 
3093db7bccf4STejun Heo 	list_for_each_entry(worker, &gcwq->idle_list, entry)
3094cb444766STejun Heo 		worker->flags |= WORKER_ROGUE;
3095db7bccf4STejun Heo 
3096db7bccf4STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq)
3097cb444766STejun Heo 		worker->flags |= WORKER_ROGUE;
3098db7bccf4STejun Heo 
3099db7bccf4STejun Heo 	/*
3100e22bee78STejun Heo 	 * Call schedule() so that we cross rq->lock and thus can
3101e22bee78STejun Heo 	 * guarantee sched callbacks see the rogue flag.  This is
3102e22bee78STejun Heo 	 * necessary as scheduler callbacks may be invoked from other
3103e22bee78STejun Heo 	 * cpus.
3104e22bee78STejun Heo 	 */
3105e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
3106e22bee78STejun Heo 	schedule();
3107e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
3108e22bee78STejun Heo 
3109e22bee78STejun Heo 	/*
3110cb444766STejun Heo 	 * Sched callbacks are disabled now.  Zap nr_running.  After
3111cb444766STejun Heo 	 * this, nr_running stays zero and need_more_worker() and
3112cb444766STejun Heo 	 * keep_working() are always true as long as the worklist is
3113cb444766STejun Heo 	 * not empty.
3114e22bee78STejun Heo 	 */
3115cb444766STejun Heo 	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3116e22bee78STejun Heo 
3117e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
3118e22bee78STejun Heo 	del_timer_sync(&gcwq->idle_timer);
3119e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
3120e22bee78STejun Heo 
3121e22bee78STejun Heo 	/*
3122db7bccf4STejun Heo 	 * We're now in charge.  Notify and proceed to drain.  We need
3123db7bccf4STejun Heo 	 * to keep the gcwq running during the whole CPU down
3124db7bccf4STejun Heo 	 * procedure as other cpu hotunplug callbacks may need to
3125db7bccf4STejun Heo 	 * flush currently running tasks.
3126db7bccf4STejun Heo 	 */
3127db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3128db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3129db7bccf4STejun Heo 
3130db7bccf4STejun Heo 	/*
3131db7bccf4STejun Heo 	 * The original cpu is in the process of dying and may go away
3132db7bccf4STejun Heo 	 * anytime now.  When that happens, we and all workers would
3133e22bee78STejun Heo 	 * be migrated to other cpus.  Try draining any left work.  We
3134e22bee78STejun Heo 	 * want to get it over with ASAP - spam rescuers, wake up as
3135e22bee78STejun Heo 	 * many idlers as necessary and create new ones till the
3136e22bee78STejun Heo 	 * worklist is empty.  Note that if the gcwq is frozen, there
3137e22bee78STejun Heo 	 * may be frozen works in freezeable cwqs.  Don't declare
3138e22bee78STejun Heo 	 * completion while frozen.
3139db7bccf4STejun Heo 	 */
3140db7bccf4STejun Heo 	while (gcwq->nr_workers != gcwq->nr_idle ||
3141db7bccf4STejun Heo 	       gcwq->flags & GCWQ_FREEZING ||
3142db7bccf4STejun Heo 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3143e22bee78STejun Heo 		int nr_works = 0;
3144e22bee78STejun Heo 
3145e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry) {
3146e22bee78STejun Heo 			send_mayday(work);
3147e22bee78STejun Heo 			nr_works++;
3148e22bee78STejun Heo 		}
3149e22bee78STejun Heo 
3150e22bee78STejun Heo 		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3151e22bee78STejun Heo 			if (!nr_works--)
3152e22bee78STejun Heo 				break;
3153e22bee78STejun Heo 			wake_up_process(worker->task);
3154e22bee78STejun Heo 		}
3155e22bee78STejun Heo 
3156e22bee78STejun Heo 		if (need_to_create_worker(gcwq)) {
3157e22bee78STejun Heo 			spin_unlock_irq(&gcwq->lock);
3158e22bee78STejun Heo 			worker = create_worker(gcwq, false);
3159e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
3160e22bee78STejun Heo 			if (worker) {
3161cb444766STejun Heo 				worker->flags |= WORKER_ROGUE;
3162e22bee78STejun Heo 				start_worker(worker);
3163e22bee78STejun Heo 			}
3164e22bee78STejun Heo 		}
3165e22bee78STejun Heo 
3166db7bccf4STejun Heo 		/* give a breather */
3167db7bccf4STejun Heo 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3168db7bccf4STejun Heo 			break;
3169db7bccf4STejun Heo 	}
3170db7bccf4STejun Heo 
3171e22bee78STejun Heo 	/*
3172e22bee78STejun Heo 	 * Either all works have been scheduled and cpu is down, or
3173e22bee78STejun Heo 	 * cpu down has already been canceled.  Wait for and butcher
3174e22bee78STejun Heo 	 * all workers till we're canceled.
3175e22bee78STejun Heo 	 */
3176e22bee78STejun Heo 	do {
3177e22bee78STejun Heo 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3178e22bee78STejun Heo 		while (!list_empty(&gcwq->idle_list))
3179e22bee78STejun Heo 			destroy_worker(list_first_entry(&gcwq->idle_list,
3180e22bee78STejun Heo 							struct worker, entry));
3181e22bee78STejun Heo 	} while (gcwq->nr_workers && rc >= 0);
3182e22bee78STejun Heo 
3183e22bee78STejun Heo 	/*
3184e22bee78STejun Heo 	 * At this point, either draining has completed and no worker
3185e22bee78STejun Heo 	 * is left, or cpu down has been canceled or the cpu is being
3186e22bee78STejun Heo 	 * brought back up.  There shouldn't be any idle one left.
3187e22bee78STejun Heo 	 * Tell the remaining busy ones to rebind once it finishes the
3188e22bee78STejun Heo 	 * currently scheduled works by scheduling the rebind_work.
3189e22bee78STejun Heo 	 */
3190e22bee78STejun Heo 	WARN_ON(!list_empty(&gcwq->idle_list));
3191e22bee78STejun Heo 
3192e22bee78STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq) {
3193e22bee78STejun Heo 		struct work_struct *rebind_work = &worker->rebind_work;
3194e22bee78STejun Heo 
3195e22bee78STejun Heo 		/*
3196e22bee78STejun Heo 		 * Rebind_work may race with future cpu hotplug
3197e22bee78STejun Heo 		 * operations.  Use a separate flag to mark that
3198e22bee78STejun Heo 		 * rebinding is scheduled.
3199e22bee78STejun Heo 		 */
3200cb444766STejun Heo 		worker->flags |= WORKER_REBIND;
3201cb444766STejun Heo 		worker->flags &= ~WORKER_ROGUE;
3202e22bee78STejun Heo 
3203e22bee78STejun Heo 		/* queue rebind_work, wq doesn't matter, use the default one */
3204e22bee78STejun Heo 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3205e22bee78STejun Heo 				     work_data_bits(rebind_work)))
3206e22bee78STejun Heo 			continue;
3207e22bee78STejun Heo 
3208e22bee78STejun Heo 		debug_work_activate(rebind_work);
3209d320c038STejun Heo 		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3210e22bee78STejun Heo 			    worker->scheduled.next,
3211e22bee78STejun Heo 			    work_color_to_flags(WORK_NO_COLOR));
3212e22bee78STejun Heo 	}
3213e22bee78STejun Heo 
3214e22bee78STejun Heo 	/* relinquish manager role */
3215e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3216e22bee78STejun Heo 
3217db7bccf4STejun Heo 	/* notify completion */
3218db7bccf4STejun Heo 	gcwq->trustee = NULL;
3219db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_DONE;
3220db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3221db7bccf4STejun Heo 	spin_unlock_irq(&gcwq->lock);
3222db7bccf4STejun Heo 	return 0;
3223db7bccf4STejun Heo }
3224db7bccf4STejun Heo 
3225db7bccf4STejun Heo /**
3226db7bccf4STejun Heo  * wait_trustee_state - wait for trustee to enter the specified state
3227db7bccf4STejun Heo  * @gcwq: gcwq the trustee of interest belongs to
3228db7bccf4STejun Heo  * @state: target state to wait for
3229db7bccf4STejun Heo  *
3230db7bccf4STejun Heo  * Wait for the trustee to reach @state.  DONE is already matched.
3231db7bccf4STejun Heo  *
3232db7bccf4STejun Heo  * CONTEXT:
3233db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3234db7bccf4STejun Heo  * multiple times.  To be used by cpu_callback.
3235db7bccf4STejun Heo  */
3236db7bccf4STejun Heo static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3237*06bd6ebfSNamhyung Kim __releases(&gcwq->lock)
3238*06bd6ebfSNamhyung Kim __acquires(&gcwq->lock)
3239db7bccf4STejun Heo {
3240db7bccf4STejun Heo 	if (!(gcwq->trustee_state == state ||
3241db7bccf4STejun Heo 	      gcwq->trustee_state == TRUSTEE_DONE)) {
3242db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);
3243db7bccf4STejun Heo 		__wait_event(gcwq->trustee_wait,
3244db7bccf4STejun Heo 			     gcwq->trustee_state == state ||
3245db7bccf4STejun Heo 			     gcwq->trustee_state == TRUSTEE_DONE);
3246db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);
3247db7bccf4STejun Heo 	}
3248db7bccf4STejun Heo }
3249db7bccf4STejun Heo 
32509c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
32511da177e4SLinus Torvalds 						unsigned long action,
32521da177e4SLinus Torvalds 						void *hcpu)
32531da177e4SLinus Torvalds {
32543af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
3255db7bccf4STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
3256db7bccf4STejun Heo 	struct task_struct *new_trustee = NULL;
3257e22bee78STejun Heo 	struct worker *uninitialized_var(new_worker);
3258db7bccf4STejun Heo 	unsigned long flags;
32591da177e4SLinus Torvalds 
32608bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
32618bb78442SRafael J. Wysocki 
32621da177e4SLinus Torvalds 	switch (action) {
3263db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3264db7bccf4STejun Heo 		new_trustee = kthread_create(trustee_thread, gcwq,
3265db7bccf4STejun Heo 					     "workqueue_trustee/%d\n", cpu);
3266db7bccf4STejun Heo 		if (IS_ERR(new_trustee))
3267db7bccf4STejun Heo 			return notifier_from_errno(PTR_ERR(new_trustee));
3268db7bccf4STejun Heo 		kthread_bind(new_trustee, cpu);
3269e22bee78STejun Heo 		/* fall through */
32703af24433SOleg Nesterov 	case CPU_UP_PREPARE:
3271e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3272e22bee78STejun Heo 		new_worker = create_worker(gcwq, false);
3273e22bee78STejun Heo 		if (!new_worker) {
3274e22bee78STejun Heo 			if (new_trustee)
3275e22bee78STejun Heo 				kthread_stop(new_trustee);
3276e22bee78STejun Heo 			return NOTIFY_BAD;
32773af24433SOleg Nesterov 		}
3278db7bccf4STejun Heo 	}
32791537663fSTejun Heo 
3280db7bccf4STejun Heo 	/* some are called w/ irq disabled, don't disturb irq status */
3281db7bccf4STejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
32823af24433SOleg Nesterov 
32833af24433SOleg Nesterov 	switch (action) {
3284db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3285db7bccf4STejun Heo 		/* initialize trustee and tell it to acquire the gcwq */
3286db7bccf4STejun Heo 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3287db7bccf4STejun Heo 		gcwq->trustee = new_trustee;
3288db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_START;
3289db7bccf4STejun Heo 		wake_up_process(gcwq->trustee);
3290db7bccf4STejun Heo 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3291e22bee78STejun Heo 		/* fall through */
32923af24433SOleg Nesterov 	case CPU_UP_PREPARE:
3293e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3294e22bee78STejun Heo 		gcwq->first_idle = new_worker;
32951da177e4SLinus Torvalds 		break;
32961da177e4SLinus Torvalds 
3297e22bee78STejun Heo 	case CPU_DYING:
3298e22bee78STejun Heo 		/*
3299e22bee78STejun Heo 		 * Before this, the trustee and all workers except for
3300e22bee78STejun Heo 		 * the ones which are still executing works from
3301e22bee78STejun Heo 		 * before the last CPU down must be on the cpu.  After
3302e22bee78STejun Heo 		 * this, they'll all be diasporas.
3303e22bee78STejun Heo 		 */
3304e22bee78STejun Heo 		gcwq->flags |= GCWQ_DISASSOCIATED;
3305db7bccf4STejun Heo 		break;
3306db7bccf4STejun Heo 
33073da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
3308db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_BUTCHER;
3309e22bee78STejun Heo 		/* fall through */
3310e22bee78STejun Heo 	case CPU_UP_CANCELED:
3311e22bee78STejun Heo 		destroy_worker(gcwq->first_idle);
3312e22bee78STejun Heo 		gcwq->first_idle = NULL;
3313db7bccf4STejun Heo 		break;
3314db7bccf4STejun Heo 
3315db7bccf4STejun Heo 	case CPU_DOWN_FAILED:
33161da177e4SLinus Torvalds 	case CPU_ONLINE:
3317e22bee78STejun Heo 		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3318db7bccf4STejun Heo 		if (gcwq->trustee_state != TRUSTEE_DONE) {
3319db7bccf4STejun Heo 			gcwq->trustee_state = TRUSTEE_RELEASE;
3320db7bccf4STejun Heo 			wake_up_process(gcwq->trustee);
3321db7bccf4STejun Heo 			wait_trustee_state(gcwq, TRUSTEE_DONE);
3322db7bccf4STejun Heo 		}
33231da177e4SLinus Torvalds 
3324e22bee78STejun Heo 		/*
3325e22bee78STejun Heo 		 * Trustee is done and there might be no worker left.
3326e22bee78STejun Heo 		 * Put the first_idle in and request a real manager to
3327e22bee78STejun Heo 		 * take a look.
3328e22bee78STejun Heo 		 */
3329e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3330e22bee78STejun Heo 		kthread_bind(gcwq->first_idle->task, cpu);
3331e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3332e22bee78STejun Heo 		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3333e22bee78STejun Heo 		start_worker(gcwq->first_idle);
3334e22bee78STejun Heo 		gcwq->first_idle = NULL;
33351da177e4SLinus Torvalds 		break;
33361da177e4SLinus Torvalds 	}
33371da177e4SLinus Torvalds 
3338db7bccf4STejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
333900dfcaf7SOleg Nesterov 
33401537663fSTejun Heo 	return notifier_from_errno(0);
33411da177e4SLinus Torvalds }
33421da177e4SLinus Torvalds 
33432d3854a3SRusty Russell #ifdef CONFIG_SMP
33448ccad40dSRusty Russell 
33452d3854a3SRusty Russell struct work_for_cpu {
33466b44003eSAndrew Morton 	struct completion completion;
33472d3854a3SRusty Russell 	long (*fn)(void *);
33482d3854a3SRusty Russell 	void *arg;
33492d3854a3SRusty Russell 	long ret;
33502d3854a3SRusty Russell };
33512d3854a3SRusty Russell 
33526b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
33532d3854a3SRusty Russell {
33546b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
33552d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
33566b44003eSAndrew Morton 	complete(&wfc->completion);
33576b44003eSAndrew Morton 	return 0;
33582d3854a3SRusty Russell }
33592d3854a3SRusty Russell 
33602d3854a3SRusty Russell /**
33612d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
33622d3854a3SRusty Russell  * @cpu: the cpu to run on
33632d3854a3SRusty Russell  * @fn: the function to run
33642d3854a3SRusty Russell  * @arg: the function arg
33652d3854a3SRusty Russell  *
336631ad9081SRusty Russell  * This will return the value @fn returns.
336731ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
33686b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
33692d3854a3SRusty Russell  */
33702d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
33712d3854a3SRusty Russell {
33726b44003eSAndrew Morton 	struct task_struct *sub_thread;
33736b44003eSAndrew Morton 	struct work_for_cpu wfc = {
33746b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
33756b44003eSAndrew Morton 		.fn = fn,
33766b44003eSAndrew Morton 		.arg = arg,
33776b44003eSAndrew Morton 	};
33782d3854a3SRusty Russell 
33796b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
33806b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
33816b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
33826b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
33836b44003eSAndrew Morton 	wake_up_process(sub_thread);
33846b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
33852d3854a3SRusty Russell 	return wfc.ret;
33862d3854a3SRusty Russell }
33872d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
33882d3854a3SRusty Russell #endif /* CONFIG_SMP */
33892d3854a3SRusty Russell 
3390a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
3391e7577c50SRusty Russell 
3392a0a1a5fdSTejun Heo /**
3393a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
3394a0a1a5fdSTejun Heo  *
3395a0a1a5fdSTejun Heo  * Start freezing workqueues.  After this function returns, all
3396a0a1a5fdSTejun Heo  * freezeable workqueues will queue new works to their frozen_works
33977e11629dSTejun Heo  * list instead of gcwq->worklist.
3398a0a1a5fdSTejun Heo  *
3399a0a1a5fdSTejun Heo  * CONTEXT:
34008b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3401a0a1a5fdSTejun Heo  */
3402a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
3403a0a1a5fdSTejun Heo {
3404a0a1a5fdSTejun Heo 	unsigned int cpu;
3405a0a1a5fdSTejun Heo 
3406a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3407a0a1a5fdSTejun Heo 
3408a0a1a5fdSTejun Heo 	BUG_ON(workqueue_freezing);
3409a0a1a5fdSTejun Heo 	workqueue_freezing = true;
3410a0a1a5fdSTejun Heo 
3411f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
34128b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3413bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
34148b03ae3cSTejun Heo 
34158b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
34168b03ae3cSTejun Heo 
3417db7bccf4STejun Heo 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3418db7bccf4STejun Heo 		gcwq->flags |= GCWQ_FREEZING;
3419db7bccf4STejun Heo 
3420a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3421a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3422a0a1a5fdSTejun Heo 
3423f3421797STejun Heo 			if (cwq && wq->flags & WQ_FREEZEABLE)
3424a0a1a5fdSTejun Heo 				cwq->max_active = 0;
34251da177e4SLinus Torvalds 		}
34268b03ae3cSTejun Heo 
34278b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3428a0a1a5fdSTejun Heo 	}
3429a0a1a5fdSTejun Heo 
3430a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3431a0a1a5fdSTejun Heo }
3432a0a1a5fdSTejun Heo 
3433a0a1a5fdSTejun Heo /**
3434a0a1a5fdSTejun Heo  * freeze_workqueues_busy - are freezeable workqueues still busy?
3435a0a1a5fdSTejun Heo  *
3436a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
3437a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
3438a0a1a5fdSTejun Heo  *
3439a0a1a5fdSTejun Heo  * CONTEXT:
3440a0a1a5fdSTejun Heo  * Grabs and releases workqueue_lock.
3441a0a1a5fdSTejun Heo  *
3442a0a1a5fdSTejun Heo  * RETURNS:
3443a0a1a5fdSTejun Heo  * %true if some freezeable workqueues are still busy.  %false if
3444a0a1a5fdSTejun Heo  * freezing is complete.
3445a0a1a5fdSTejun Heo  */
3446a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
3447a0a1a5fdSTejun Heo {
3448a0a1a5fdSTejun Heo 	unsigned int cpu;
3449a0a1a5fdSTejun Heo 	bool busy = false;
3450a0a1a5fdSTejun Heo 
3451a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3452a0a1a5fdSTejun Heo 
3453a0a1a5fdSTejun Heo 	BUG_ON(!workqueue_freezing);
3454a0a1a5fdSTejun Heo 
3455f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
3456bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
3457a0a1a5fdSTejun Heo 		/*
3458a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
3459a0a1a5fdSTejun Heo 		 * to peek without lock.
3460a0a1a5fdSTejun Heo 		 */
3461a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3462a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3463a0a1a5fdSTejun Heo 
3464f3421797STejun Heo 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3465a0a1a5fdSTejun Heo 				continue;
3466a0a1a5fdSTejun Heo 
3467a0a1a5fdSTejun Heo 			BUG_ON(cwq->nr_active < 0);
3468a0a1a5fdSTejun Heo 			if (cwq->nr_active) {
3469a0a1a5fdSTejun Heo 				busy = true;
3470a0a1a5fdSTejun Heo 				goto out_unlock;
3471a0a1a5fdSTejun Heo 			}
3472a0a1a5fdSTejun Heo 		}
3473a0a1a5fdSTejun Heo 	}
3474a0a1a5fdSTejun Heo out_unlock:
3475a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3476a0a1a5fdSTejun Heo 	return busy;
3477a0a1a5fdSTejun Heo }
3478a0a1a5fdSTejun Heo 
3479a0a1a5fdSTejun Heo /**
3480a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
3481a0a1a5fdSTejun Heo  *
3482a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
34837e11629dSTejun Heo  * frozen works are transferred to their respective gcwq worklists.
3484a0a1a5fdSTejun Heo  *
3485a0a1a5fdSTejun Heo  * CONTEXT:
34868b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3487a0a1a5fdSTejun Heo  */
3488a0a1a5fdSTejun Heo void thaw_workqueues(void)
3489a0a1a5fdSTejun Heo {
3490a0a1a5fdSTejun Heo 	unsigned int cpu;
3491a0a1a5fdSTejun Heo 
3492a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3493a0a1a5fdSTejun Heo 
3494a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
3495a0a1a5fdSTejun Heo 		goto out_unlock;
3496a0a1a5fdSTejun Heo 
3497f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
34988b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3499bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
35008b03ae3cSTejun Heo 
35018b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
35028b03ae3cSTejun Heo 
3503db7bccf4STejun Heo 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3504db7bccf4STejun Heo 		gcwq->flags &= ~GCWQ_FREEZING;
3505db7bccf4STejun Heo 
3506a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3507a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3508a0a1a5fdSTejun Heo 
3509f3421797STejun Heo 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3510a0a1a5fdSTejun Heo 				continue;
3511a0a1a5fdSTejun Heo 
3512a0a1a5fdSTejun Heo 			/* restore max_active and repopulate worklist */
3513a0a1a5fdSTejun Heo 			cwq->max_active = wq->saved_max_active;
3514a0a1a5fdSTejun Heo 
3515a0a1a5fdSTejun Heo 			while (!list_empty(&cwq->delayed_works) &&
3516a0a1a5fdSTejun Heo 			       cwq->nr_active < cwq->max_active)
3517a0a1a5fdSTejun Heo 				cwq_activate_first_delayed(cwq);
3518a0a1a5fdSTejun Heo 		}
35198b03ae3cSTejun Heo 
3520e22bee78STejun Heo 		wake_up_worker(gcwq);
3521e22bee78STejun Heo 
35228b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3523a0a1a5fdSTejun Heo 	}
3524a0a1a5fdSTejun Heo 
3525a0a1a5fdSTejun Heo 	workqueue_freezing = false;
3526a0a1a5fdSTejun Heo out_unlock:
3527a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3528a0a1a5fdSTejun Heo }
3529a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
3530a0a1a5fdSTejun Heo 
35316ee0578bSSuresh Siddha static int __init init_workqueues(void)
35321da177e4SLinus Torvalds {
3533c34056a3STejun Heo 	unsigned int cpu;
3534c8e55f36STejun Heo 	int i;
3535c34056a3STejun Heo 
3536f6500947STejun Heo 	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
35378b03ae3cSTejun Heo 
35388b03ae3cSTejun Heo 	/* initialize gcwqs */
3539f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
35408b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
35418b03ae3cSTejun Heo 
35428b03ae3cSTejun Heo 		spin_lock_init(&gcwq->lock);
35437e11629dSTejun Heo 		INIT_LIST_HEAD(&gcwq->worklist);
35448b03ae3cSTejun Heo 		gcwq->cpu = cpu;
3545f3421797STejun Heo 		if (cpu == WORK_CPU_UNBOUND)
3546f3421797STejun Heo 			gcwq->flags |= GCWQ_DISASSOCIATED;
35478b03ae3cSTejun Heo 
3548c8e55f36STejun Heo 		INIT_LIST_HEAD(&gcwq->idle_list);
3549c8e55f36STejun Heo 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3550c8e55f36STejun Heo 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3551c8e55f36STejun Heo 
3552e22bee78STejun Heo 		init_timer_deferrable(&gcwq->idle_timer);
3553e22bee78STejun Heo 		gcwq->idle_timer.function = idle_worker_timeout;
3554e22bee78STejun Heo 		gcwq->idle_timer.data = (unsigned long)gcwq;
3555e22bee78STejun Heo 
3556e22bee78STejun Heo 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3557e22bee78STejun Heo 			    (unsigned long)gcwq);
3558e22bee78STejun Heo 
35598b03ae3cSTejun Heo 		ida_init(&gcwq->worker_ida);
3560db7bccf4STejun Heo 
3561db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_DONE;
3562db7bccf4STejun Heo 		init_waitqueue_head(&gcwq->trustee_wait);
35638b03ae3cSTejun Heo 	}
35648b03ae3cSTejun Heo 
3565e22bee78STejun Heo 	/* create the initial worker */
3566f3421797STejun Heo 	for_each_online_gcwq_cpu(cpu) {
3567e22bee78STejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3568e22bee78STejun Heo 		struct worker *worker;
3569e22bee78STejun Heo 
3570e22bee78STejun Heo 		worker = create_worker(gcwq, true);
3571e22bee78STejun Heo 		BUG_ON(!worker);
3572e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3573e22bee78STejun Heo 		start_worker(worker);
3574e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3575e22bee78STejun Heo 	}
3576e22bee78STejun Heo 
3577d320c038STejun Heo 	system_wq = alloc_workqueue("events", 0, 0);
3578d320c038STejun Heo 	system_long_wq = alloc_workqueue("events_long", 0, 0);
3579d320c038STejun Heo 	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3580f3421797STejun Heo 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3581f3421797STejun Heo 					    WQ_UNBOUND_MAX_ACTIVE);
3582d320c038STejun Heo 	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
35836ee0578bSSuresh Siddha 	return 0;
35841da177e4SLinus Torvalds }
35856ee0578bSSuresh Siddha early_initcall(init_workqueues);
3586