xref: /linux-6.15/kernel/workqueue.c (revision 8a2e8e5d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36c34056a3STejun Heo #include <linux/idr.h>
37e22bee78STejun Heo 
38e22bee78STejun Heo #include "workqueue_sched.h"
391da177e4SLinus Torvalds 
40c8e55f36STejun Heo enum {
41db7bccf4STejun Heo 	/* global_cwq flags */
42e22bee78STejun Heo 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
43e22bee78STejun Heo 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
44e22bee78STejun Heo 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
45db7bccf4STejun Heo 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
46649027d7STejun Heo 	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
47db7bccf4STejun Heo 
48c8e55f36STejun Heo 	/* worker flags */
49c8e55f36STejun Heo 	WORKER_STARTED		= 1 << 0,	/* started */
50c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
51c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
52e22bee78STejun Heo 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
53db7bccf4STejun Heo 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
54e22bee78STejun Heo 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
55fb0e7bebSTejun Heo 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
56f3421797STejun Heo 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
57e22bee78STejun Heo 
58fb0e7bebSTejun Heo 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
59f3421797STejun Heo 				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
60db7bccf4STejun Heo 
61db7bccf4STejun Heo 	/* gcwq->trustee_state */
62db7bccf4STejun Heo 	TRUSTEE_START		= 0,		/* start */
63db7bccf4STejun Heo 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
64db7bccf4STejun Heo 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
65db7bccf4STejun Heo 	TRUSTEE_RELEASE		= 3,		/* release workers */
66db7bccf4STejun Heo 	TRUSTEE_DONE		= 4,		/* trustee is done */
67c8e55f36STejun Heo 
68c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
69c8e55f36STejun Heo 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
70c8e55f36STejun Heo 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
71db7bccf4STejun Heo 
72e22bee78STejun Heo 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
73e22bee78STejun Heo 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
74e22bee78STejun Heo 
75e22bee78STejun Heo 	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
76e22bee78STejun Heo 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
77e22bee78STejun Heo 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
78db7bccf4STejun Heo 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
791da177e4SLinus Torvalds 
801da177e4SLinus Torvalds 	/*
81e22bee78STejun Heo 	 * Rescue workers are used only on emergencies and shared by
82e22bee78STejun Heo 	 * all cpus.  Give -20.
83e22bee78STejun Heo 	 */
84e22bee78STejun Heo 	RESCUER_NICE_LEVEL	= -20,
85c8e55f36STejun Heo };
86c8e55f36STejun Heo 
871da177e4SLinus Torvalds /*
884690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
894690c4abSTejun Heo  *
90e41e704bSTejun Heo  * I: Modifiable by initialization/destruction paths and read-only for
91e41e704bSTejun Heo  *    everyone else.
924690c4abSTejun Heo  *
93e22bee78STejun Heo  * P: Preemption protected.  Disabling preemption is enough and should
94e22bee78STejun Heo  *    only be modified and accessed from the local cpu.
95e22bee78STejun Heo  *
968b03ae3cSTejun Heo  * L: gcwq->lock protected.  Access with gcwq->lock held.
974690c4abSTejun Heo  *
98e22bee78STejun Heo  * X: During normal operation, modification requires gcwq->lock and
99e22bee78STejun Heo  *    should be done only from local cpu.  Either disabling preemption
100e22bee78STejun Heo  *    on local cpu or grabbing gcwq->lock is enough for read access.
101f3421797STejun Heo  *    If GCWQ_DISASSOCIATED is set, it's identical to L.
102e22bee78STejun Heo  *
10373f53c4aSTejun Heo  * F: wq->flush_mutex protected.
10473f53c4aSTejun Heo  *
1054690c4abSTejun Heo  * W: workqueue_lock protected.
1064690c4abSTejun Heo  */
1074690c4abSTejun Heo 
1088b03ae3cSTejun Heo struct global_cwq;
109c34056a3STejun Heo 
110e22bee78STejun Heo /*
111e22bee78STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers
112e22bee78STejun Heo  * are either serving the manager role, on idle list or on busy hash.
113e22bee78STejun Heo  */
114c34056a3STejun Heo struct worker {
115c8e55f36STejun Heo 	/* on idle list while idle, on busy hash table while busy */
116c8e55f36STejun Heo 	union {
117c8e55f36STejun Heo 		struct list_head	entry;	/* L: while idle */
118c8e55f36STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
119c8e55f36STejun Heo 	};
120c8e55f36STejun Heo 
121c34056a3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
1228cca0eeaSTejun Heo 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
123affee4b2STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
124c34056a3STejun Heo 	struct task_struct	*task;		/* I: worker task */
1258b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
126e22bee78STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
127e22bee78STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
128e22bee78STejun Heo 	unsigned int		flags;		/* X: flags */
129c34056a3STejun Heo 	int			id;		/* I: worker id */
130e22bee78STejun Heo 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
131c34056a3STejun Heo };
132c34056a3STejun Heo 
1334690c4abSTejun Heo /*
134e22bee78STejun Heo  * Global per-cpu workqueue.  There's one and only one for each cpu
135e22bee78STejun Heo  * and all works are queued and processed here regardless of their
136e22bee78STejun Heo  * target workqueues.
1378b03ae3cSTejun Heo  */
1388b03ae3cSTejun Heo struct global_cwq {
1398b03ae3cSTejun Heo 	spinlock_t		lock;		/* the gcwq lock */
1407e11629dSTejun Heo 	struct list_head	worklist;	/* L: list of pending works */
1418b03ae3cSTejun Heo 	unsigned int		cpu;		/* I: the associated cpu */
142db7bccf4STejun Heo 	unsigned int		flags;		/* L: GCWQ_* flags */
143c8e55f36STejun Heo 
144c8e55f36STejun Heo 	int			nr_workers;	/* L: total number of workers */
145c8e55f36STejun Heo 	int			nr_idle;	/* L: currently idle ones */
146c8e55f36STejun Heo 
147c8e55f36STejun Heo 	/* workers are chained either in the idle_list or busy_hash */
148e22bee78STejun Heo 	struct list_head	idle_list;	/* X: list of idle workers */
149c8e55f36STejun Heo 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
150c8e55f36STejun Heo 						/* L: hash of busy workers */
151c8e55f36STejun Heo 
152e22bee78STejun Heo 	struct timer_list	idle_timer;	/* L: worker idle timeout */
153e22bee78STejun Heo 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
154e22bee78STejun Heo 
1558b03ae3cSTejun Heo 	struct ida		worker_ida;	/* L: for worker IDs */
156db7bccf4STejun Heo 
157db7bccf4STejun Heo 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
158db7bccf4STejun Heo 	unsigned int		trustee_state;	/* L: trustee state */
159db7bccf4STejun Heo 	wait_queue_head_t	trustee_wait;	/* trustee wait */
160e22bee78STejun Heo 	struct worker		*first_idle;	/* L: first idle worker */
1618b03ae3cSTejun Heo } ____cacheline_aligned_in_smp;
1628b03ae3cSTejun Heo 
1638b03ae3cSTejun Heo /*
164502ca9d8STejun Heo  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
1650f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
1660f900049STejun Heo  * aligned at two's power of the number of flag bits.
1671da177e4SLinus Torvalds  */
1681da177e4SLinus Torvalds struct cpu_workqueue_struct {
1698b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
1704690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
17173f53c4aSTejun Heo 	int			work_color;	/* L: current color */
17273f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
17373f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
17473f53c4aSTejun Heo 						/* L: nr of in_flight works */
1751e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
176a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
1771e19ffc6STejun Heo 	struct list_head	delayed_works;	/* L: delayed works */
1780f900049STejun Heo };
1791da177e4SLinus Torvalds 
1801da177e4SLinus Torvalds /*
18173f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
18273f53c4aSTejun Heo  */
18373f53c4aSTejun Heo struct wq_flusher {
18473f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
18573f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
18673f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
18773f53c4aSTejun Heo };
1881da177e4SLinus Torvalds 
18973f53c4aSTejun Heo /*
190f2e005aaSTejun Heo  * All cpumasks are assumed to be always set on UP and thus can't be
191f2e005aaSTejun Heo  * used to determine whether there's something to be done.
192f2e005aaSTejun Heo  */
193f2e005aaSTejun Heo #ifdef CONFIG_SMP
194f2e005aaSTejun Heo typedef cpumask_var_t mayday_mask_t;
195f2e005aaSTejun Heo #define mayday_test_and_set_cpu(cpu, mask)	\
196f2e005aaSTejun Heo 	cpumask_test_and_set_cpu((cpu), (mask))
197f2e005aaSTejun Heo #define mayday_clear_cpu(cpu, mask)		cpumask_clear_cpu((cpu), (mask))
198f2e005aaSTejun Heo #define for_each_mayday_cpu(cpu, mask)		for_each_cpu((cpu), (mask))
199f2e005aaSTejun Heo #define alloc_mayday_mask(maskp, gfp)		alloc_cpumask_var((maskp), (gfp))
200f2e005aaSTejun Heo #define free_mayday_mask(mask)			free_cpumask_var((mask))
201f2e005aaSTejun Heo #else
202f2e005aaSTejun Heo typedef unsigned long mayday_mask_t;
203f2e005aaSTejun Heo #define mayday_test_and_set_cpu(cpu, mask)	test_and_set_bit(0, &(mask))
204f2e005aaSTejun Heo #define mayday_clear_cpu(cpu, mask)		clear_bit(0, &(mask))
205f2e005aaSTejun Heo #define for_each_mayday_cpu(cpu, mask)		if ((cpu) = 0, (mask))
206f2e005aaSTejun Heo #define alloc_mayday_mask(maskp, gfp)		true
207f2e005aaSTejun Heo #define free_mayday_mask(mask)			do { } while (0)
208f2e005aaSTejun Heo #endif
2091da177e4SLinus Torvalds 
2101da177e4SLinus Torvalds /*
2111da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
2121da177e4SLinus Torvalds  * per-CPU workqueues:
2131da177e4SLinus Torvalds  */
2141da177e4SLinus Torvalds struct workqueue_struct {
21597e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
216bdbc5dd7STejun Heo 	union {
217bdbc5dd7STejun Heo 		struct cpu_workqueue_struct __percpu	*pcpu;
218bdbc5dd7STejun Heo 		struct cpu_workqueue_struct		*single;
219bdbc5dd7STejun Heo 		unsigned long				v;
220bdbc5dd7STejun Heo 	} cpu_wq;				/* I: cwq's */
2214690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
22273f53c4aSTejun Heo 
22373f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
22473f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
22573f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
22673f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
22773f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
22873f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
22973f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
23073f53c4aSTejun Heo 
231f2e005aaSTejun Heo 	mayday_mask_t		mayday_mask;	/* cpus requesting rescue */
232e22bee78STejun Heo 	struct worker		*rescuer;	/* I: rescue worker */
233e22bee78STejun Heo 
234dcd989cbSTejun Heo 	int			saved_max_active; /* W: saved cwq max_active */
2354690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
2364e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
2374e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
2384e6045f1SJohannes Berg #endif
2391da177e4SLinus Torvalds };
2401da177e4SLinus Torvalds 
241d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly;
242d320c038STejun Heo struct workqueue_struct *system_long_wq __read_mostly;
243d320c038STejun Heo struct workqueue_struct *system_nrt_wq __read_mostly;
244f3421797STejun Heo struct workqueue_struct *system_unbound_wq __read_mostly;
245d320c038STejun Heo EXPORT_SYMBOL_GPL(system_wq);
246d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq);
247d320c038STejun Heo EXPORT_SYMBOL_GPL(system_nrt_wq);
248f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq);
249d320c038STejun Heo 
250db7bccf4STejun Heo #define for_each_busy_worker(worker, i, pos, gcwq)			\
251db7bccf4STejun Heo 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
252db7bccf4STejun Heo 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
253db7bccf4STejun Heo 
254f3421797STejun Heo static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
255f3421797STejun Heo 				  unsigned int sw)
256f3421797STejun Heo {
257f3421797STejun Heo 	if (cpu < nr_cpu_ids) {
258f3421797STejun Heo 		if (sw & 1) {
259f3421797STejun Heo 			cpu = cpumask_next(cpu, mask);
260f3421797STejun Heo 			if (cpu < nr_cpu_ids)
261f3421797STejun Heo 				return cpu;
262f3421797STejun Heo 		}
263f3421797STejun Heo 		if (sw & 2)
264f3421797STejun Heo 			return WORK_CPU_UNBOUND;
265f3421797STejun Heo 	}
266f3421797STejun Heo 	return WORK_CPU_NONE;
267f3421797STejun Heo }
268f3421797STejun Heo 
269f3421797STejun Heo static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
270f3421797STejun Heo 				struct workqueue_struct *wq)
271f3421797STejun Heo {
272f3421797STejun Heo 	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
273f3421797STejun Heo }
274f3421797STejun Heo 
27509884951STejun Heo /*
27609884951STejun Heo  * CPU iterators
27709884951STejun Heo  *
27809884951STejun Heo  * An extra gcwq is defined for an invalid cpu number
27909884951STejun Heo  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
28009884951STejun Heo  * specific CPU.  The following iterators are similar to
28109884951STejun Heo  * for_each_*_cpu() iterators but also considers the unbound gcwq.
28209884951STejun Heo  *
28309884951STejun Heo  * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
28409884951STejun Heo  * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND
28509884951STejun Heo  * for_each_cwq_cpu()		: possible CPUs for bound workqueues,
28609884951STejun Heo  *				  WORK_CPU_UNBOUND for unbound workqueues
28709884951STejun Heo  */
288f3421797STejun Heo #define for_each_gcwq_cpu(cpu)						\
289f3421797STejun Heo 	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
290f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
291f3421797STejun Heo 	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
292f3421797STejun Heo 
293f3421797STejun Heo #define for_each_online_gcwq_cpu(cpu)					\
294f3421797STejun Heo 	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
295f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
296f3421797STejun Heo 	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
297f3421797STejun Heo 
298f3421797STejun Heo #define for_each_cwq_cpu(cpu, wq)					\
299f3421797STejun Heo 	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
300f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
301f3421797STejun Heo 	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
302f3421797STejun Heo 
303a25909a4SPaul E. McKenney #ifdef CONFIG_LOCKDEP
304a25909a4SPaul E. McKenney /**
305a25909a4SPaul E. McKenney  * in_workqueue_context() - in context of specified workqueue?
306a25909a4SPaul E. McKenney  * @wq: the workqueue of interest
307a25909a4SPaul E. McKenney  *
308a25909a4SPaul E. McKenney  * Checks lockdep state to see if the current task is executing from
309a25909a4SPaul E. McKenney  * within a workqueue item.  This function exists only if lockdep is
310a25909a4SPaul E. McKenney  * enabled.
311a25909a4SPaul E. McKenney  */
312a25909a4SPaul E. McKenney int in_workqueue_context(struct workqueue_struct *wq)
313a25909a4SPaul E. McKenney {
314a25909a4SPaul E. McKenney 	return lock_is_held(&wq->lockdep_map);
315a25909a4SPaul E. McKenney }
316a25909a4SPaul E. McKenney #endif
317a25909a4SPaul E. McKenney 
318dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
319dc186ad7SThomas Gleixner 
320dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
321dc186ad7SThomas Gleixner 
322dc186ad7SThomas Gleixner /*
323dc186ad7SThomas Gleixner  * fixup_init is called when:
324dc186ad7SThomas Gleixner  * - an active object is initialized
325dc186ad7SThomas Gleixner  */
326dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
327dc186ad7SThomas Gleixner {
328dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
329dc186ad7SThomas Gleixner 
330dc186ad7SThomas Gleixner 	switch (state) {
331dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
332dc186ad7SThomas Gleixner 		cancel_work_sync(work);
333dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
334dc186ad7SThomas Gleixner 		return 1;
335dc186ad7SThomas Gleixner 	default:
336dc186ad7SThomas Gleixner 		return 0;
337dc186ad7SThomas Gleixner 	}
338dc186ad7SThomas Gleixner }
339dc186ad7SThomas Gleixner 
340dc186ad7SThomas Gleixner /*
341dc186ad7SThomas Gleixner  * fixup_activate is called when:
342dc186ad7SThomas Gleixner  * - an active object is activated
343dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
344dc186ad7SThomas Gleixner  */
345dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
346dc186ad7SThomas Gleixner {
347dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
348dc186ad7SThomas Gleixner 
349dc186ad7SThomas Gleixner 	switch (state) {
350dc186ad7SThomas Gleixner 
351dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
352dc186ad7SThomas Gleixner 		/*
353dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
354dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
355dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
356dc186ad7SThomas Gleixner 		 */
35722df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
358dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
359dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
360dc186ad7SThomas Gleixner 			return 0;
361dc186ad7SThomas Gleixner 		}
362dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
363dc186ad7SThomas Gleixner 		return 0;
364dc186ad7SThomas Gleixner 
365dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
366dc186ad7SThomas Gleixner 		WARN_ON(1);
367dc186ad7SThomas Gleixner 
368dc186ad7SThomas Gleixner 	default:
369dc186ad7SThomas Gleixner 		return 0;
370dc186ad7SThomas Gleixner 	}
371dc186ad7SThomas Gleixner }
372dc186ad7SThomas Gleixner 
373dc186ad7SThomas Gleixner /*
374dc186ad7SThomas Gleixner  * fixup_free is called when:
375dc186ad7SThomas Gleixner  * - an active object is freed
376dc186ad7SThomas Gleixner  */
377dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
378dc186ad7SThomas Gleixner {
379dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
380dc186ad7SThomas Gleixner 
381dc186ad7SThomas Gleixner 	switch (state) {
382dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
383dc186ad7SThomas Gleixner 		cancel_work_sync(work);
384dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
385dc186ad7SThomas Gleixner 		return 1;
386dc186ad7SThomas Gleixner 	default:
387dc186ad7SThomas Gleixner 		return 0;
388dc186ad7SThomas Gleixner 	}
389dc186ad7SThomas Gleixner }
390dc186ad7SThomas Gleixner 
391dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
392dc186ad7SThomas Gleixner 	.name		= "work_struct",
393dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
394dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
395dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
396dc186ad7SThomas Gleixner };
397dc186ad7SThomas Gleixner 
398dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
399dc186ad7SThomas Gleixner {
400dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
401dc186ad7SThomas Gleixner }
402dc186ad7SThomas Gleixner 
403dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
404dc186ad7SThomas Gleixner {
405dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
406dc186ad7SThomas Gleixner }
407dc186ad7SThomas Gleixner 
408dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
409dc186ad7SThomas Gleixner {
410dc186ad7SThomas Gleixner 	if (onstack)
411dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
412dc186ad7SThomas Gleixner 	else
413dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
414dc186ad7SThomas Gleixner }
415dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
416dc186ad7SThomas Gleixner 
417dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
418dc186ad7SThomas Gleixner {
419dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
420dc186ad7SThomas Gleixner }
421dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
422dc186ad7SThomas Gleixner 
423dc186ad7SThomas Gleixner #else
424dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
425dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
426dc186ad7SThomas Gleixner #endif
427dc186ad7SThomas Gleixner 
42895402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
42995402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
4301da177e4SLinus Torvalds static LIST_HEAD(workqueues);
431a0a1a5fdSTejun Heo static bool workqueue_freezing;		/* W: have wqs started freezing? */
4321da177e4SLinus Torvalds 
43314441960SOleg Nesterov /*
434e22bee78STejun Heo  * The almighty global cpu workqueues.  nr_running is the only field
435e22bee78STejun Heo  * which is expected to be used frequently by other cpus via
436e22bee78STejun Heo  * try_to_wake_up().  Put it in a separate cacheline.
43714441960SOleg Nesterov  */
4388b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq);
439e22bee78STejun Heo static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
440f756d5e2SNathan Lynch 
441f3421797STejun Heo /*
442f3421797STejun Heo  * Global cpu workqueue and nr_running counter for unbound gcwq.  The
443f3421797STejun Heo  * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
444f3421797STejun Heo  * workers have WORKER_UNBOUND set.
445f3421797STejun Heo  */
446f3421797STejun Heo static struct global_cwq unbound_global_cwq;
447f3421797STejun Heo static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */
448f3421797STejun Heo 
449c34056a3STejun Heo static int worker_thread(void *__worker);
4501da177e4SLinus Torvalds 
4518b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu)
4521da177e4SLinus Torvalds {
453f3421797STejun Heo 	if (cpu != WORK_CPU_UNBOUND)
4548b03ae3cSTejun Heo 		return &per_cpu(global_cwq, cpu);
455f3421797STejun Heo 	else
456f3421797STejun Heo 		return &unbound_global_cwq;
4571da177e4SLinus Torvalds }
4581da177e4SLinus Torvalds 
459e22bee78STejun Heo static atomic_t *get_gcwq_nr_running(unsigned int cpu)
460b1f4ec17SOleg Nesterov {
461f3421797STejun Heo 	if (cpu != WORK_CPU_UNBOUND)
462e22bee78STejun Heo 		return &per_cpu(gcwq_nr_running, cpu);
463f3421797STejun Heo 	else
464f3421797STejun Heo 		return &unbound_gcwq_nr_running;
465b1f4ec17SOleg Nesterov }
466b1f4ec17SOleg Nesterov 
4674690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
4684690c4abSTejun Heo 					    struct workqueue_struct *wq)
469a848e3b6SOleg Nesterov {
470f3421797STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
471f3421797STejun Heo 		if (likely(cpu < nr_cpu_ids)) {
472f3421797STejun Heo #ifdef CONFIG_SMP
473bdbc5dd7STejun Heo 			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
474f3421797STejun Heo #else
475f3421797STejun Heo 			return wq->cpu_wq.single;
476bdbc5dd7STejun Heo #endif
477a848e3b6SOleg Nesterov 		}
478f3421797STejun Heo 	} else if (likely(cpu == WORK_CPU_UNBOUND))
479f3421797STejun Heo 		return wq->cpu_wq.single;
480f3421797STejun Heo 	return NULL;
481f3421797STejun Heo }
482a848e3b6SOleg Nesterov 
48373f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
48473f53c4aSTejun Heo {
48573f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
48673f53c4aSTejun Heo }
48773f53c4aSTejun Heo 
48873f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
48973f53c4aSTejun Heo {
49073f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
49173f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
49273f53c4aSTejun Heo }
49373f53c4aSTejun Heo 
49473f53c4aSTejun Heo static int work_next_color(int color)
49573f53c4aSTejun Heo {
49673f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
4971da177e4SLinus Torvalds }
4981da177e4SLinus Torvalds 
4994594bf15SDavid Howells /*
500e120153dSTejun Heo  * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
501e120153dSTejun Heo  * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
502e120153dSTejun Heo  * cleared and the work data contains the cpu number it was last on.
5037a22ad75STejun Heo  *
5047a22ad75STejun Heo  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
5057a22ad75STejun Heo  * cwq, cpu or clear work->data.  These functions should only be
5067a22ad75STejun Heo  * called while the work is owned - ie. while the PENDING bit is set.
5077a22ad75STejun Heo  *
5087a22ad75STejun Heo  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
5097a22ad75STejun Heo  * corresponding to a work.  gcwq is available once the work has been
5107a22ad75STejun Heo  * queued anywhere after initialization.  cwq is available only from
5117a22ad75STejun Heo  * queueing until execution starts.
5124594bf15SDavid Howells  */
5137a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data,
5147a22ad75STejun Heo 				 unsigned long flags)
5157a22ad75STejun Heo {
5167a22ad75STejun Heo 	BUG_ON(!work_pending(work));
5177a22ad75STejun Heo 	atomic_long_set(&work->data, data | flags | work_static(work));
5187a22ad75STejun Heo }
5197a22ad75STejun Heo 
5207a22ad75STejun Heo static void set_work_cwq(struct work_struct *work,
5214690c4abSTejun Heo 			 struct cpu_workqueue_struct *cwq,
5224690c4abSTejun Heo 			 unsigned long extra_flags)
523365970a1SDavid Howells {
5247a22ad75STejun Heo 	set_work_data(work, (unsigned long)cwq,
525e120153dSTejun Heo 		      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
526365970a1SDavid Howells }
527365970a1SDavid Howells 
5287a22ad75STejun Heo static void set_work_cpu(struct work_struct *work, unsigned int cpu)
5294d707b9fSOleg Nesterov {
5307a22ad75STejun Heo 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
5314d707b9fSOleg Nesterov }
5324d707b9fSOleg Nesterov 
5337a22ad75STejun Heo static void clear_work_data(struct work_struct *work)
534365970a1SDavid Howells {
5357a22ad75STejun Heo 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
5367a22ad75STejun Heo }
5377a22ad75STejun Heo 
5387a22ad75STejun Heo static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
5397a22ad75STejun Heo {
540e120153dSTejun Heo 	unsigned long data = atomic_long_read(&work->data);
5417a22ad75STejun Heo 
542e120153dSTejun Heo 	if (data & WORK_STRUCT_CWQ)
543e120153dSTejun Heo 		return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
544e120153dSTejun Heo 	else
545e120153dSTejun Heo 		return NULL;
5467a22ad75STejun Heo }
5477a22ad75STejun Heo 
5487a22ad75STejun Heo static struct global_cwq *get_work_gcwq(struct work_struct *work)
5497a22ad75STejun Heo {
550e120153dSTejun Heo 	unsigned long data = atomic_long_read(&work->data);
5517a22ad75STejun Heo 	unsigned int cpu;
5527a22ad75STejun Heo 
553e120153dSTejun Heo 	if (data & WORK_STRUCT_CWQ)
554e120153dSTejun Heo 		return ((struct cpu_workqueue_struct *)
555e120153dSTejun Heo 			(data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
5567a22ad75STejun Heo 
5577a22ad75STejun Heo 	cpu = data >> WORK_STRUCT_FLAG_BITS;
558bdbc5dd7STejun Heo 	if (cpu == WORK_CPU_NONE)
5597a22ad75STejun Heo 		return NULL;
5607a22ad75STejun Heo 
561f3421797STejun Heo 	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
5627a22ad75STejun Heo 	return get_gcwq(cpu);
563365970a1SDavid Howells }
564365970a1SDavid Howells 
565e22bee78STejun Heo /*
566e22bee78STejun Heo  * Policy functions.  These define the policies on how the global
567e22bee78STejun Heo  * worker pool is managed.  Unless noted otherwise, these functions
568e22bee78STejun Heo  * assume that they're being called with gcwq->lock held.
569e22bee78STejun Heo  */
570e22bee78STejun Heo 
571649027d7STejun Heo static bool __need_more_worker(struct global_cwq *gcwq)
572649027d7STejun Heo {
573649027d7STejun Heo 	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
574649027d7STejun Heo 		gcwq->flags & GCWQ_HIGHPRI_PENDING;
575649027d7STejun Heo }
576649027d7STejun Heo 
577e22bee78STejun Heo /*
578e22bee78STejun Heo  * Need to wake up a worker?  Called from anything but currently
579e22bee78STejun Heo  * running workers.
580e22bee78STejun Heo  */
581e22bee78STejun Heo static bool need_more_worker(struct global_cwq *gcwq)
582e22bee78STejun Heo {
583649027d7STejun Heo 	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
584e22bee78STejun Heo }
585e22bee78STejun Heo 
586e22bee78STejun Heo /* Can I start working?  Called from busy but !running workers. */
587e22bee78STejun Heo static bool may_start_working(struct global_cwq *gcwq)
588e22bee78STejun Heo {
589e22bee78STejun Heo 	return gcwq->nr_idle;
590e22bee78STejun Heo }
591e22bee78STejun Heo 
592e22bee78STejun Heo /* Do I need to keep working?  Called from currently running workers. */
593e22bee78STejun Heo static bool keep_working(struct global_cwq *gcwq)
594e22bee78STejun Heo {
595e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
596e22bee78STejun Heo 
597e22bee78STejun Heo 	return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
598e22bee78STejun Heo }
599e22bee78STejun Heo 
600e22bee78STejun Heo /* Do we need a new worker?  Called from manager. */
601e22bee78STejun Heo static bool need_to_create_worker(struct global_cwq *gcwq)
602e22bee78STejun Heo {
603e22bee78STejun Heo 	return need_more_worker(gcwq) && !may_start_working(gcwq);
604e22bee78STejun Heo }
605e22bee78STejun Heo 
606e22bee78STejun Heo /* Do I need to be the manager? */
607e22bee78STejun Heo static bool need_to_manage_workers(struct global_cwq *gcwq)
608e22bee78STejun Heo {
609e22bee78STejun Heo 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
610e22bee78STejun Heo }
611e22bee78STejun Heo 
612e22bee78STejun Heo /* Do we have too many workers and should some go away? */
613e22bee78STejun Heo static bool too_many_workers(struct global_cwq *gcwq)
614e22bee78STejun Heo {
615e22bee78STejun Heo 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
616e22bee78STejun Heo 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
617e22bee78STejun Heo 	int nr_busy = gcwq->nr_workers - nr_idle;
618e22bee78STejun Heo 
619e22bee78STejun Heo 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
620e22bee78STejun Heo }
621e22bee78STejun Heo 
622e22bee78STejun Heo /*
623e22bee78STejun Heo  * Wake up functions.
624e22bee78STejun Heo  */
625e22bee78STejun Heo 
6267e11629dSTejun Heo /* Return the first worker.  Safe with preemption disabled */
6277e11629dSTejun Heo static struct worker *first_worker(struct global_cwq *gcwq)
6287e11629dSTejun Heo {
6297e11629dSTejun Heo 	if (unlikely(list_empty(&gcwq->idle_list)))
6307e11629dSTejun Heo 		return NULL;
6317e11629dSTejun Heo 
6327e11629dSTejun Heo 	return list_first_entry(&gcwq->idle_list, struct worker, entry);
6337e11629dSTejun Heo }
6347e11629dSTejun Heo 
6357e11629dSTejun Heo /**
6367e11629dSTejun Heo  * wake_up_worker - wake up an idle worker
6377e11629dSTejun Heo  * @gcwq: gcwq to wake worker for
6387e11629dSTejun Heo  *
6397e11629dSTejun Heo  * Wake up the first idle worker of @gcwq.
6407e11629dSTejun Heo  *
6417e11629dSTejun Heo  * CONTEXT:
6427e11629dSTejun Heo  * spin_lock_irq(gcwq->lock).
6437e11629dSTejun Heo  */
6447e11629dSTejun Heo static void wake_up_worker(struct global_cwq *gcwq)
6457e11629dSTejun Heo {
6467e11629dSTejun Heo 	struct worker *worker = first_worker(gcwq);
6477e11629dSTejun Heo 
6487e11629dSTejun Heo 	if (likely(worker))
6497e11629dSTejun Heo 		wake_up_process(worker->task);
6507e11629dSTejun Heo }
6517e11629dSTejun Heo 
6524690c4abSTejun Heo /**
653e22bee78STejun Heo  * wq_worker_waking_up - a worker is waking up
654e22bee78STejun Heo  * @task: task waking up
655e22bee78STejun Heo  * @cpu: CPU @task is waking up to
656e22bee78STejun Heo  *
657e22bee78STejun Heo  * This function is called during try_to_wake_up() when a worker is
658e22bee78STejun Heo  * being awoken.
659e22bee78STejun Heo  *
660e22bee78STejun Heo  * CONTEXT:
661e22bee78STejun Heo  * spin_lock_irq(rq->lock)
662e22bee78STejun Heo  */
663e22bee78STejun Heo void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
664e22bee78STejun Heo {
665e22bee78STejun Heo 	struct worker *worker = kthread_data(task);
666e22bee78STejun Heo 
667e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
668e22bee78STejun Heo 		atomic_inc(get_gcwq_nr_running(cpu));
669e22bee78STejun Heo }
670e22bee78STejun Heo 
671e22bee78STejun Heo /**
672e22bee78STejun Heo  * wq_worker_sleeping - a worker is going to sleep
673e22bee78STejun Heo  * @task: task going to sleep
674e22bee78STejun Heo  * @cpu: CPU in question, must be the current CPU number
675e22bee78STejun Heo  *
676e22bee78STejun Heo  * This function is called during schedule() when a busy worker is
677e22bee78STejun Heo  * going to sleep.  Worker on the same cpu can be woken up by
678e22bee78STejun Heo  * returning pointer to its task.
679e22bee78STejun Heo  *
680e22bee78STejun Heo  * CONTEXT:
681e22bee78STejun Heo  * spin_lock_irq(rq->lock)
682e22bee78STejun Heo  *
683e22bee78STejun Heo  * RETURNS:
684e22bee78STejun Heo  * Worker task on @cpu to wake up, %NULL if none.
685e22bee78STejun Heo  */
686e22bee78STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task,
687e22bee78STejun Heo 				       unsigned int cpu)
688e22bee78STejun Heo {
689e22bee78STejun Heo 	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
690e22bee78STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
691e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(cpu);
692e22bee78STejun Heo 
693e22bee78STejun Heo 	if (unlikely(worker->flags & WORKER_NOT_RUNNING))
694e22bee78STejun Heo 		return NULL;
695e22bee78STejun Heo 
696e22bee78STejun Heo 	/* this can only happen on the local cpu */
697e22bee78STejun Heo 	BUG_ON(cpu != raw_smp_processor_id());
698e22bee78STejun Heo 
699e22bee78STejun Heo 	/*
700e22bee78STejun Heo 	 * The counterpart of the following dec_and_test, implied mb,
701e22bee78STejun Heo 	 * worklist not empty test sequence is in insert_work().
702e22bee78STejun Heo 	 * Please read comment there.
703e22bee78STejun Heo 	 *
704e22bee78STejun Heo 	 * NOT_RUNNING is clear.  This means that trustee is not in
705e22bee78STejun Heo 	 * charge and we're running on the local cpu w/ rq lock held
706e22bee78STejun Heo 	 * and preemption disabled, which in turn means that none else
707e22bee78STejun Heo 	 * could be manipulating idle_list, so dereferencing idle_list
708e22bee78STejun Heo 	 * without gcwq lock is safe.
709e22bee78STejun Heo 	 */
710e22bee78STejun Heo 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
711e22bee78STejun Heo 		to_wakeup = first_worker(gcwq);
712e22bee78STejun Heo 	return to_wakeup ? to_wakeup->task : NULL;
713e22bee78STejun Heo }
714e22bee78STejun Heo 
715e22bee78STejun Heo /**
716e22bee78STejun Heo  * worker_set_flags - set worker flags and adjust nr_running accordingly
717cb444766STejun Heo  * @worker: self
718d302f017STejun Heo  * @flags: flags to set
719d302f017STejun Heo  * @wakeup: wakeup an idle worker if necessary
720d302f017STejun Heo  *
721e22bee78STejun Heo  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
722e22bee78STejun Heo  * nr_running becomes zero and @wakeup is %true, an idle worker is
723e22bee78STejun Heo  * woken up.
724d302f017STejun Heo  *
725cb444766STejun Heo  * CONTEXT:
726cb444766STejun Heo  * spin_lock_irq(gcwq->lock)
727d302f017STejun Heo  */
728d302f017STejun Heo static inline void worker_set_flags(struct worker *worker, unsigned int flags,
729d302f017STejun Heo 				    bool wakeup)
730d302f017STejun Heo {
731e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
732e22bee78STejun Heo 
733cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
734cb444766STejun Heo 
735e22bee78STejun Heo 	/*
736e22bee78STejun Heo 	 * If transitioning into NOT_RUNNING, adjust nr_running and
737e22bee78STejun Heo 	 * wake up an idle worker as necessary if requested by
738e22bee78STejun Heo 	 * @wakeup.
739e22bee78STejun Heo 	 */
740e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) &&
741e22bee78STejun Heo 	    !(worker->flags & WORKER_NOT_RUNNING)) {
742e22bee78STejun Heo 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
743e22bee78STejun Heo 
744e22bee78STejun Heo 		if (wakeup) {
745e22bee78STejun Heo 			if (atomic_dec_and_test(nr_running) &&
746e22bee78STejun Heo 			    !list_empty(&gcwq->worklist))
747e22bee78STejun Heo 				wake_up_worker(gcwq);
748e22bee78STejun Heo 		} else
749e22bee78STejun Heo 			atomic_dec(nr_running);
750e22bee78STejun Heo 	}
751e22bee78STejun Heo 
752d302f017STejun Heo 	worker->flags |= flags;
753d302f017STejun Heo }
754d302f017STejun Heo 
755d302f017STejun Heo /**
756e22bee78STejun Heo  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
757cb444766STejun Heo  * @worker: self
758d302f017STejun Heo  * @flags: flags to clear
759d302f017STejun Heo  *
760e22bee78STejun Heo  * Clear @flags in @worker->flags and adjust nr_running accordingly.
761d302f017STejun Heo  *
762cb444766STejun Heo  * CONTEXT:
763cb444766STejun Heo  * spin_lock_irq(gcwq->lock)
764d302f017STejun Heo  */
765d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
766d302f017STejun Heo {
767e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
768e22bee78STejun Heo 	unsigned int oflags = worker->flags;
769e22bee78STejun Heo 
770cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
771cb444766STejun Heo 
772d302f017STejun Heo 	worker->flags &= ~flags;
773e22bee78STejun Heo 
774e22bee78STejun Heo 	/* if transitioning out of NOT_RUNNING, increment nr_running */
775e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
776e22bee78STejun Heo 		if (!(worker->flags & WORKER_NOT_RUNNING))
777e22bee78STejun Heo 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
778d302f017STejun Heo }
779d302f017STejun Heo 
780d302f017STejun Heo /**
781c8e55f36STejun Heo  * busy_worker_head - return the busy hash head for a work
782c8e55f36STejun Heo  * @gcwq: gcwq of interest
783c8e55f36STejun Heo  * @work: work to be hashed
784c8e55f36STejun Heo  *
785c8e55f36STejun Heo  * Return hash head of @gcwq for @work.
786c8e55f36STejun Heo  *
787c8e55f36STejun Heo  * CONTEXT:
788c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
789c8e55f36STejun Heo  *
790c8e55f36STejun Heo  * RETURNS:
791c8e55f36STejun Heo  * Pointer to the hash head.
792c8e55f36STejun Heo  */
793c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
794c8e55f36STejun Heo 					   struct work_struct *work)
795c8e55f36STejun Heo {
796c8e55f36STejun Heo 	const int base_shift = ilog2(sizeof(struct work_struct));
797c8e55f36STejun Heo 	unsigned long v = (unsigned long)work;
798c8e55f36STejun Heo 
799c8e55f36STejun Heo 	/* simple shift and fold hash, do we need something better? */
800c8e55f36STejun Heo 	v >>= base_shift;
801c8e55f36STejun Heo 	v += v >> BUSY_WORKER_HASH_ORDER;
802c8e55f36STejun Heo 	v &= BUSY_WORKER_HASH_MASK;
803c8e55f36STejun Heo 
804c8e55f36STejun Heo 	return &gcwq->busy_hash[v];
805c8e55f36STejun Heo }
806c8e55f36STejun Heo 
807c8e55f36STejun Heo /**
8088cca0eeaSTejun Heo  * __find_worker_executing_work - find worker which is executing a work
8098cca0eeaSTejun Heo  * @gcwq: gcwq of interest
8108cca0eeaSTejun Heo  * @bwh: hash head as returned by busy_worker_head()
8118cca0eeaSTejun Heo  * @work: work to find worker for
8128cca0eeaSTejun Heo  *
8138cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  @bwh should be
8148cca0eeaSTejun Heo  * the hash head obtained by calling busy_worker_head() with the same
8158cca0eeaSTejun Heo  * work.
8168cca0eeaSTejun Heo  *
8178cca0eeaSTejun Heo  * CONTEXT:
8188cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
8198cca0eeaSTejun Heo  *
8208cca0eeaSTejun Heo  * RETURNS:
8218cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
8228cca0eeaSTejun Heo  * otherwise.
8238cca0eeaSTejun Heo  */
8248cca0eeaSTejun Heo static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
8258cca0eeaSTejun Heo 						   struct hlist_head *bwh,
8268cca0eeaSTejun Heo 						   struct work_struct *work)
8278cca0eeaSTejun Heo {
8288cca0eeaSTejun Heo 	struct worker *worker;
8298cca0eeaSTejun Heo 	struct hlist_node *tmp;
8308cca0eeaSTejun Heo 
8318cca0eeaSTejun Heo 	hlist_for_each_entry(worker, tmp, bwh, hentry)
8328cca0eeaSTejun Heo 		if (worker->current_work == work)
8338cca0eeaSTejun Heo 			return worker;
8348cca0eeaSTejun Heo 	return NULL;
8358cca0eeaSTejun Heo }
8368cca0eeaSTejun Heo 
8378cca0eeaSTejun Heo /**
8388cca0eeaSTejun Heo  * find_worker_executing_work - find worker which is executing a work
8398cca0eeaSTejun Heo  * @gcwq: gcwq of interest
8408cca0eeaSTejun Heo  * @work: work to find worker for
8418cca0eeaSTejun Heo  *
8428cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  This function is
8438cca0eeaSTejun Heo  * identical to __find_worker_executing_work() except that this
8448cca0eeaSTejun Heo  * function calculates @bwh itself.
8458cca0eeaSTejun Heo  *
8468cca0eeaSTejun Heo  * CONTEXT:
8478cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
8488cca0eeaSTejun Heo  *
8498cca0eeaSTejun Heo  * RETURNS:
8508cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
8518cca0eeaSTejun Heo  * otherwise.
8528cca0eeaSTejun Heo  */
8538cca0eeaSTejun Heo static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
8548cca0eeaSTejun Heo 						 struct work_struct *work)
8558cca0eeaSTejun Heo {
8568cca0eeaSTejun Heo 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
8578cca0eeaSTejun Heo 					    work);
8588cca0eeaSTejun Heo }
8598cca0eeaSTejun Heo 
8608cca0eeaSTejun Heo /**
861649027d7STejun Heo  * gcwq_determine_ins_pos - find insertion position
862649027d7STejun Heo  * @gcwq: gcwq of interest
863649027d7STejun Heo  * @cwq: cwq a work is being queued for
864649027d7STejun Heo  *
865649027d7STejun Heo  * A work for @cwq is about to be queued on @gcwq, determine insertion
866649027d7STejun Heo  * position for the work.  If @cwq is for HIGHPRI wq, the work is
867649027d7STejun Heo  * queued at the head of the queue but in FIFO order with respect to
868649027d7STejun Heo  * other HIGHPRI works; otherwise, at the end of the queue.  This
869649027d7STejun Heo  * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
870649027d7STejun Heo  * there are HIGHPRI works pending.
871649027d7STejun Heo  *
872649027d7STejun Heo  * CONTEXT:
873649027d7STejun Heo  * spin_lock_irq(gcwq->lock).
874649027d7STejun Heo  *
875649027d7STejun Heo  * RETURNS:
876649027d7STejun Heo  * Pointer to inserstion position.
877649027d7STejun Heo  */
878649027d7STejun Heo static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
8791da177e4SLinus Torvalds 					       struct cpu_workqueue_struct *cwq)
8801da177e4SLinus Torvalds {
881649027d7STejun Heo 	struct work_struct *twork;
8821da177e4SLinus Torvalds 
883649027d7STejun Heo 	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
884649027d7STejun Heo 		return &gcwq->worklist;
8851da177e4SLinus Torvalds 
886649027d7STejun Heo 	list_for_each_entry(twork, &gcwq->worklist, entry) {
887649027d7STejun Heo 		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
888649027d7STejun Heo 
889649027d7STejun Heo 		if (!(tcwq->wq->flags & WQ_HIGHPRI))
890649027d7STejun Heo 			break;
8911da177e4SLinus Torvalds 	}
8921da177e4SLinus Torvalds 
893649027d7STejun Heo 	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
894649027d7STejun Heo 	return &twork->entry;
895649027d7STejun Heo }
896649027d7STejun Heo 
897649027d7STejun Heo /**
8987e11629dSTejun Heo  * insert_work - insert a work into gcwq
8994690c4abSTejun Heo  * @cwq: cwq @work belongs to
9004690c4abSTejun Heo  * @work: work to insert
9014690c4abSTejun Heo  * @head: insertion point
9024690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
9034690c4abSTejun Heo  *
9047e11629dSTejun Heo  * Insert @work which belongs to @cwq into @gcwq after @head.
9057e11629dSTejun Heo  * @extra_flags is or'd to work_struct flags.
9064690c4abSTejun Heo  *
9074690c4abSTejun Heo  * CONTEXT:
9088b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
9091da177e4SLinus Torvalds  */
910b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
9114690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
9124690c4abSTejun Heo 			unsigned int extra_flags)
913b89deed3SOleg Nesterov {
914e22bee78STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
915e1d8aa9fSFrederic Weisbecker 
9164690c4abSTejun Heo 	/* we own @work, set data and link */
9177a22ad75STejun Heo 	set_work_cwq(work, cwq, extra_flags);
9184690c4abSTejun Heo 
9196e84d644SOleg Nesterov 	/*
9206e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
9216e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
9226e84d644SOleg Nesterov 	 */
9236e84d644SOleg Nesterov 	smp_wmb();
9244690c4abSTejun Heo 
9251a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
926e22bee78STejun Heo 
927e22bee78STejun Heo 	/*
928e22bee78STejun Heo 	 * Ensure either worker_sched_deactivated() sees the above
929e22bee78STejun Heo 	 * list_add_tail() or we see zero nr_running to avoid workers
930e22bee78STejun Heo 	 * lying around lazily while there are works to be processed.
931e22bee78STejun Heo 	 */
932e22bee78STejun Heo 	smp_mb();
933e22bee78STejun Heo 
934649027d7STejun Heo 	if (__need_more_worker(gcwq))
935e22bee78STejun Heo 		wake_up_worker(gcwq);
936b89deed3SOleg Nesterov }
937b89deed3SOleg Nesterov 
9384690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
9391da177e4SLinus Torvalds 			 struct work_struct *work)
9401da177e4SLinus Torvalds {
941502ca9d8STejun Heo 	struct global_cwq *gcwq;
942502ca9d8STejun Heo 	struct cpu_workqueue_struct *cwq;
9431e19ffc6STejun Heo 	struct list_head *worklist;
944*8a2e8e5dSTejun Heo 	unsigned int work_flags;
9451da177e4SLinus Torvalds 	unsigned long flags;
9461da177e4SLinus Torvalds 
947dc186ad7SThomas Gleixner 	debug_work_activate(work);
9481e19ffc6STejun Heo 
949e41e704bSTejun Heo 	if (WARN_ON_ONCE(wq->flags & WQ_DYING))
950e41e704bSTejun Heo 		return;
951e41e704bSTejun Heo 
952c7fc77f7STejun Heo 	/* determine gcwq to use */
953c7fc77f7STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
954c7fc77f7STejun Heo 		struct global_cwq *last_gcwq;
955c7fc77f7STejun Heo 
956f3421797STejun Heo 		if (unlikely(cpu == WORK_CPU_UNBOUND))
957f3421797STejun Heo 			cpu = raw_smp_processor_id();
958f3421797STejun Heo 
95918aa9effSTejun Heo 		/*
96018aa9effSTejun Heo 		 * It's multi cpu.  If @wq is non-reentrant and @work
96118aa9effSTejun Heo 		 * was previously on a different cpu, it might still
96218aa9effSTejun Heo 		 * be running there, in which case the work needs to
96318aa9effSTejun Heo 		 * be queued on that cpu to guarantee non-reentrance.
96418aa9effSTejun Heo 		 */
965502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
96618aa9effSTejun Heo 		if (wq->flags & WQ_NON_REENTRANT &&
96718aa9effSTejun Heo 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
96818aa9effSTejun Heo 			struct worker *worker;
96918aa9effSTejun Heo 
97018aa9effSTejun Heo 			spin_lock_irqsave(&last_gcwq->lock, flags);
97118aa9effSTejun Heo 
97218aa9effSTejun Heo 			worker = find_worker_executing_work(last_gcwq, work);
97318aa9effSTejun Heo 
97418aa9effSTejun Heo 			if (worker && worker->current_cwq->wq == wq)
97518aa9effSTejun Heo 				gcwq = last_gcwq;
97618aa9effSTejun Heo 			else {
97718aa9effSTejun Heo 				/* meh... not running there, queue here */
97818aa9effSTejun Heo 				spin_unlock_irqrestore(&last_gcwq->lock, flags);
97918aa9effSTejun Heo 				spin_lock_irqsave(&gcwq->lock, flags);
98018aa9effSTejun Heo 			}
98118aa9effSTejun Heo 		} else
9828b03ae3cSTejun Heo 			spin_lock_irqsave(&gcwq->lock, flags);
983f3421797STejun Heo 	} else {
984f3421797STejun Heo 		gcwq = get_gcwq(WORK_CPU_UNBOUND);
985f3421797STejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
986502ca9d8STejun Heo 	}
987502ca9d8STejun Heo 
988502ca9d8STejun Heo 	/* gcwq determined, get cwq and queue */
989502ca9d8STejun Heo 	cwq = get_cwq(gcwq->cpu, wq);
990502ca9d8STejun Heo 
9914690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
9921e19ffc6STejun Heo 
99373f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
994*8a2e8e5dSTejun Heo 	work_flags = work_color_to_flags(cwq->work_color);
9951e19ffc6STejun Heo 
9961e19ffc6STejun Heo 	if (likely(cwq->nr_active < cwq->max_active)) {
9971e19ffc6STejun Heo 		cwq->nr_active++;
998649027d7STejun Heo 		worklist = gcwq_determine_ins_pos(gcwq, cwq);
999*8a2e8e5dSTejun Heo 	} else {
1000*8a2e8e5dSTejun Heo 		work_flags |= WORK_STRUCT_DELAYED;
10011e19ffc6STejun Heo 		worklist = &cwq->delayed_works;
1002*8a2e8e5dSTejun Heo 	}
10031e19ffc6STejun Heo 
1004*8a2e8e5dSTejun Heo 	insert_work(cwq, work, worklist, work_flags);
10051e19ffc6STejun Heo 
10068b03ae3cSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
10071da177e4SLinus Torvalds }
10081da177e4SLinus Torvalds 
10090fcb78c2SRolf Eike Beer /**
10100fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
10110fcb78c2SRolf Eike Beer  * @wq: workqueue to use
10120fcb78c2SRolf Eike Beer  * @work: work to queue
10130fcb78c2SRolf Eike Beer  *
1014057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10151da177e4SLinus Torvalds  *
101600dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
101700dfcaf7SOleg Nesterov  * it can be processed by another CPU.
10181da177e4SLinus Torvalds  */
10197ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
10201da177e4SLinus Torvalds {
1021ef1ca236SOleg Nesterov 	int ret;
10221da177e4SLinus Torvalds 
1023ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
1024a848e3b6SOleg Nesterov 	put_cpu();
1025ef1ca236SOleg Nesterov 
10261da177e4SLinus Torvalds 	return ret;
10271da177e4SLinus Torvalds }
1028ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
10291da177e4SLinus Torvalds 
1030c1a220e7SZhang Rui /**
1031c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
1032c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
1033c1a220e7SZhang Rui  * @wq: workqueue to use
1034c1a220e7SZhang Rui  * @work: work to queue
1035c1a220e7SZhang Rui  *
1036c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
1037c1a220e7SZhang Rui  *
1038c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
1039c1a220e7SZhang Rui  * can't go away.
1040c1a220e7SZhang Rui  */
1041c1a220e7SZhang Rui int
1042c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1043c1a220e7SZhang Rui {
1044c1a220e7SZhang Rui 	int ret = 0;
1045c1a220e7SZhang Rui 
104622df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
10474690c4abSTejun Heo 		__queue_work(cpu, wq, work);
1048c1a220e7SZhang Rui 		ret = 1;
1049c1a220e7SZhang Rui 	}
1050c1a220e7SZhang Rui 	return ret;
1051c1a220e7SZhang Rui }
1052c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
1053c1a220e7SZhang Rui 
10546d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
10551da177e4SLinus Torvalds {
105652bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
10577a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
10581da177e4SLinus Torvalds 
10594690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
10601da177e4SLinus Torvalds }
10611da177e4SLinus Torvalds 
10620fcb78c2SRolf Eike Beer /**
10630fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
10640fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1065af9997e4SRandy Dunlap  * @dwork: delayable work to queue
10660fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
10670fcb78c2SRolf Eike Beer  *
1068057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10690fcb78c2SRolf Eike Beer  */
10707ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
107152bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10721da177e4SLinus Torvalds {
107352bad64dSDavid Howells 	if (delay == 0)
107463bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
10751da177e4SLinus Torvalds 
107663bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
10771da177e4SLinus Torvalds }
1078ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
10791da177e4SLinus Torvalds 
10800fcb78c2SRolf Eike Beer /**
10810fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
10820fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
10830fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1084af9997e4SRandy Dunlap  * @dwork: work to queue
10850fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
10860fcb78c2SRolf Eike Beer  *
1087057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10880fcb78c2SRolf Eike Beer  */
10897a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
109052bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10917a6bc1cdSVenkatesh Pallipadi {
10927a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
109352bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
109452bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
10957a6bc1cdSVenkatesh Pallipadi 
109622df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1097c7fc77f7STejun Heo 		unsigned int lcpu;
10987a22ad75STejun Heo 
10997a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
11007a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
11017a6bc1cdSVenkatesh Pallipadi 
11028a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
11038a3e77ccSAndrew Liu 
11047a22ad75STejun Heo 		/*
11057a22ad75STejun Heo 		 * This stores cwq for the moment, for the timer_fn.
11067a22ad75STejun Heo 		 * Note that the work's gcwq is preserved to allow
11077a22ad75STejun Heo 		 * reentrance detection for delayed works.
11087a22ad75STejun Heo 		 */
1109c7fc77f7STejun Heo 		if (!(wq->flags & WQ_UNBOUND)) {
1110c7fc77f7STejun Heo 			struct global_cwq *gcwq = get_work_gcwq(work);
1111c7fc77f7STejun Heo 
1112c7fc77f7STejun Heo 			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1113c7fc77f7STejun Heo 				lcpu = gcwq->cpu;
1114c7fc77f7STejun Heo 			else
1115c7fc77f7STejun Heo 				lcpu = raw_smp_processor_id();
1116c7fc77f7STejun Heo 		} else
1117c7fc77f7STejun Heo 			lcpu = WORK_CPU_UNBOUND;
1118c7fc77f7STejun Heo 
11197a22ad75STejun Heo 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
1120c7fc77f7STejun Heo 
11217a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
112252bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
11237a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
112463bc0362SOleg Nesterov 
112563bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
11267a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
112763bc0362SOleg Nesterov 		else
112863bc0362SOleg Nesterov 			add_timer(timer);
11297a6bc1cdSVenkatesh Pallipadi 		ret = 1;
11307a6bc1cdSVenkatesh Pallipadi 	}
11317a6bc1cdSVenkatesh Pallipadi 	return ret;
11327a6bc1cdSVenkatesh Pallipadi }
1133ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
11341da177e4SLinus Torvalds 
1135c8e55f36STejun Heo /**
1136c8e55f36STejun Heo  * worker_enter_idle - enter idle state
1137c8e55f36STejun Heo  * @worker: worker which is entering idle state
1138c8e55f36STejun Heo  *
1139c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
1140c8e55f36STejun Heo  * necessary.
1141c8e55f36STejun Heo  *
1142c8e55f36STejun Heo  * LOCKING:
1143c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1144c8e55f36STejun Heo  */
1145c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
11461da177e4SLinus Torvalds {
1147c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1148c8e55f36STejun Heo 
1149c8e55f36STejun Heo 	BUG_ON(worker->flags & WORKER_IDLE);
1150c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->entry) &&
1151c8e55f36STejun Heo 	       (worker->hentry.next || worker->hentry.pprev));
1152c8e55f36STejun Heo 
1153cb444766STejun Heo 	/* can't use worker_set_flags(), also called from start_worker() */
1154cb444766STejun Heo 	worker->flags |= WORKER_IDLE;
1155c8e55f36STejun Heo 	gcwq->nr_idle++;
1156e22bee78STejun Heo 	worker->last_active = jiffies;
1157c8e55f36STejun Heo 
1158c8e55f36STejun Heo 	/* idle_list is LIFO */
1159c8e55f36STejun Heo 	list_add(&worker->entry, &gcwq->idle_list);
1160db7bccf4STejun Heo 
1161e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_ROGUE))) {
1162e22bee78STejun Heo 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1163e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer,
1164e22bee78STejun Heo 				  jiffies + IDLE_WORKER_TIMEOUT);
1165e22bee78STejun Heo 	} else
1166db7bccf4STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1167cb444766STejun Heo 
1168cb444766STejun Heo 	/* sanity check nr_running */
1169cb444766STejun Heo 	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1170cb444766STejun Heo 		     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1171c8e55f36STejun Heo }
1172c8e55f36STejun Heo 
1173c8e55f36STejun Heo /**
1174c8e55f36STejun Heo  * worker_leave_idle - leave idle state
1175c8e55f36STejun Heo  * @worker: worker which is leaving idle state
1176c8e55f36STejun Heo  *
1177c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
1178c8e55f36STejun Heo  *
1179c8e55f36STejun Heo  * LOCKING:
1180c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1181c8e55f36STejun Heo  */
1182c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
1183c8e55f36STejun Heo {
1184c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1185c8e55f36STejun Heo 
1186c8e55f36STejun Heo 	BUG_ON(!(worker->flags & WORKER_IDLE));
1187d302f017STejun Heo 	worker_clr_flags(worker, WORKER_IDLE);
1188c8e55f36STejun Heo 	gcwq->nr_idle--;
1189c8e55f36STejun Heo 	list_del_init(&worker->entry);
1190c8e55f36STejun Heo }
1191c8e55f36STejun Heo 
1192e22bee78STejun Heo /**
1193e22bee78STejun Heo  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1194e22bee78STejun Heo  * @worker: self
1195e22bee78STejun Heo  *
1196e22bee78STejun Heo  * Works which are scheduled while the cpu is online must at least be
1197e22bee78STejun Heo  * scheduled to a worker which is bound to the cpu so that if they are
1198e22bee78STejun Heo  * flushed from cpu callbacks while cpu is going down, they are
1199e22bee78STejun Heo  * guaranteed to execute on the cpu.
1200e22bee78STejun Heo  *
1201e22bee78STejun Heo  * This function is to be used by rogue workers and rescuers to bind
1202e22bee78STejun Heo  * themselves to the target cpu and may race with cpu going down or
1203e22bee78STejun Heo  * coming online.  kthread_bind() can't be used because it may put the
1204e22bee78STejun Heo  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1205e22bee78STejun Heo  * verbatim as it's best effort and blocking and gcwq may be
1206e22bee78STejun Heo  * [dis]associated in the meantime.
1207e22bee78STejun Heo  *
1208e22bee78STejun Heo  * This function tries set_cpus_allowed() and locks gcwq and verifies
1209e22bee78STejun Heo  * the binding against GCWQ_DISASSOCIATED which is set during
1210e22bee78STejun Heo  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1211e22bee78STejun Heo  * idle state or fetches works without dropping lock, it can guarantee
1212e22bee78STejun Heo  * the scheduling requirement described in the first paragraph.
1213e22bee78STejun Heo  *
1214e22bee78STejun Heo  * CONTEXT:
1215e22bee78STejun Heo  * Might sleep.  Called without any lock but returns with gcwq->lock
1216e22bee78STejun Heo  * held.
1217e22bee78STejun Heo  *
1218e22bee78STejun Heo  * RETURNS:
1219e22bee78STejun Heo  * %true if the associated gcwq is online (@worker is successfully
1220e22bee78STejun Heo  * bound), %false if offline.
1221e22bee78STejun Heo  */
1222e22bee78STejun Heo static bool worker_maybe_bind_and_lock(struct worker *worker)
1223972fa1c5SNamhyung Kim __acquires(&gcwq->lock)
1224e22bee78STejun Heo {
1225e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1226e22bee78STejun Heo 	struct task_struct *task = worker->task;
1227e22bee78STejun Heo 
1228e22bee78STejun Heo 	while (true) {
1229e22bee78STejun Heo 		/*
1230e22bee78STejun Heo 		 * The following call may fail, succeed or succeed
1231e22bee78STejun Heo 		 * without actually migrating the task to the cpu if
1232e22bee78STejun Heo 		 * it races with cpu hotunplug operation.  Verify
1233e22bee78STejun Heo 		 * against GCWQ_DISASSOCIATED.
1234e22bee78STejun Heo 		 */
1235f3421797STejun Heo 		if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1236e22bee78STejun Heo 			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1237e22bee78STejun Heo 
1238e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1239e22bee78STejun Heo 		if (gcwq->flags & GCWQ_DISASSOCIATED)
1240e22bee78STejun Heo 			return false;
1241e22bee78STejun Heo 		if (task_cpu(task) == gcwq->cpu &&
1242e22bee78STejun Heo 		    cpumask_equal(&current->cpus_allowed,
1243e22bee78STejun Heo 				  get_cpu_mask(gcwq->cpu)))
1244e22bee78STejun Heo 			return true;
1245e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1246e22bee78STejun Heo 
1247e22bee78STejun Heo 		/* CPU has come up inbetween, retry migration */
1248e22bee78STejun Heo 		cpu_relax();
1249e22bee78STejun Heo 	}
1250e22bee78STejun Heo }
1251e22bee78STejun Heo 
1252e22bee78STejun Heo /*
1253e22bee78STejun Heo  * Function for worker->rebind_work used to rebind rogue busy workers
1254e22bee78STejun Heo  * to the associated cpu which is coming back online.  This is
1255e22bee78STejun Heo  * scheduled by cpu up but can race with other cpu hotplug operations
1256e22bee78STejun Heo  * and may be executed twice without intervening cpu down.
1257e22bee78STejun Heo  */
1258e22bee78STejun Heo static void worker_rebind_fn(struct work_struct *work)
1259e22bee78STejun Heo {
1260e22bee78STejun Heo 	struct worker *worker = container_of(work, struct worker, rebind_work);
1261e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1262e22bee78STejun Heo 
1263e22bee78STejun Heo 	if (worker_maybe_bind_and_lock(worker))
1264e22bee78STejun Heo 		worker_clr_flags(worker, WORKER_REBIND);
1265e22bee78STejun Heo 
1266e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1267e22bee78STejun Heo }
1268e22bee78STejun Heo 
1269c34056a3STejun Heo static struct worker *alloc_worker(void)
1270c34056a3STejun Heo {
1271c34056a3STejun Heo 	struct worker *worker;
1272c34056a3STejun Heo 
1273c34056a3STejun Heo 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1274c8e55f36STejun Heo 	if (worker) {
1275c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
1276affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
1277e22bee78STejun Heo 		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1278e22bee78STejun Heo 		/* on creation a worker is in !idle && prep state */
1279e22bee78STejun Heo 		worker->flags = WORKER_PREP;
1280c8e55f36STejun Heo 	}
1281c34056a3STejun Heo 	return worker;
1282c34056a3STejun Heo }
1283c34056a3STejun Heo 
1284c34056a3STejun Heo /**
1285c34056a3STejun Heo  * create_worker - create a new workqueue worker
12867e11629dSTejun Heo  * @gcwq: gcwq the new worker will belong to
1287c34056a3STejun Heo  * @bind: whether to set affinity to @cpu or not
1288c34056a3STejun Heo  *
12897e11629dSTejun Heo  * Create a new worker which is bound to @gcwq.  The returned worker
1290c34056a3STejun Heo  * can be started by calling start_worker() or destroyed using
1291c34056a3STejun Heo  * destroy_worker().
1292c34056a3STejun Heo  *
1293c34056a3STejun Heo  * CONTEXT:
1294c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
1295c34056a3STejun Heo  *
1296c34056a3STejun Heo  * RETURNS:
1297c34056a3STejun Heo  * Pointer to the newly created worker.
1298c34056a3STejun Heo  */
12997e11629dSTejun Heo static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1300c34056a3STejun Heo {
1301f3421797STejun Heo 	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1302c34056a3STejun Heo 	struct worker *worker = NULL;
1303f3421797STejun Heo 	int id = -1;
1304c34056a3STejun Heo 
13058b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
13068b03ae3cSTejun Heo 	while (ida_get_new(&gcwq->worker_ida, &id)) {
13078b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
13088b03ae3cSTejun Heo 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1309c34056a3STejun Heo 			goto fail;
13108b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
1311c34056a3STejun Heo 	}
13128b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1313c34056a3STejun Heo 
1314c34056a3STejun Heo 	worker = alloc_worker();
1315c34056a3STejun Heo 	if (!worker)
1316c34056a3STejun Heo 		goto fail;
1317c34056a3STejun Heo 
13188b03ae3cSTejun Heo 	worker->gcwq = gcwq;
1319c34056a3STejun Heo 	worker->id = id;
1320c34056a3STejun Heo 
1321f3421797STejun Heo 	if (!on_unbound_cpu)
1322f3421797STejun Heo 		worker->task = kthread_create(worker_thread, worker,
1323f3421797STejun Heo 					      "kworker/%u:%d", gcwq->cpu, id);
1324f3421797STejun Heo 	else
1325f3421797STejun Heo 		worker->task = kthread_create(worker_thread, worker,
1326f3421797STejun Heo 					      "kworker/u:%d", id);
1327c34056a3STejun Heo 	if (IS_ERR(worker->task))
1328c34056a3STejun Heo 		goto fail;
1329c34056a3STejun Heo 
1330db7bccf4STejun Heo 	/*
1331db7bccf4STejun Heo 	 * A rogue worker will become a regular one if CPU comes
1332db7bccf4STejun Heo 	 * online later on.  Make sure every worker has
1333db7bccf4STejun Heo 	 * PF_THREAD_BOUND set.
1334db7bccf4STejun Heo 	 */
1335f3421797STejun Heo 	if (bind && !on_unbound_cpu)
13368b03ae3cSTejun Heo 		kthread_bind(worker->task, gcwq->cpu);
1337f3421797STejun Heo 	else {
1338db7bccf4STejun Heo 		worker->task->flags |= PF_THREAD_BOUND;
1339f3421797STejun Heo 		if (on_unbound_cpu)
1340f3421797STejun Heo 			worker->flags |= WORKER_UNBOUND;
1341f3421797STejun Heo 	}
1342c34056a3STejun Heo 
1343c34056a3STejun Heo 	return worker;
1344c34056a3STejun Heo fail:
1345c34056a3STejun Heo 	if (id >= 0) {
13468b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
13478b03ae3cSTejun Heo 		ida_remove(&gcwq->worker_ida, id);
13488b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1349c34056a3STejun Heo 	}
1350c34056a3STejun Heo 	kfree(worker);
1351c34056a3STejun Heo 	return NULL;
1352c34056a3STejun Heo }
1353c34056a3STejun Heo 
1354c34056a3STejun Heo /**
1355c34056a3STejun Heo  * start_worker - start a newly created worker
1356c34056a3STejun Heo  * @worker: worker to start
1357c34056a3STejun Heo  *
1358c8e55f36STejun Heo  * Make the gcwq aware of @worker and start it.
1359c34056a3STejun Heo  *
1360c34056a3STejun Heo  * CONTEXT:
13618b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1362c34056a3STejun Heo  */
1363c34056a3STejun Heo static void start_worker(struct worker *worker)
1364c34056a3STejun Heo {
1365cb444766STejun Heo 	worker->flags |= WORKER_STARTED;
1366c8e55f36STejun Heo 	worker->gcwq->nr_workers++;
1367c8e55f36STejun Heo 	worker_enter_idle(worker);
1368c34056a3STejun Heo 	wake_up_process(worker->task);
1369c34056a3STejun Heo }
1370c34056a3STejun Heo 
1371c34056a3STejun Heo /**
1372c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
1373c34056a3STejun Heo  * @worker: worker to be destroyed
1374c34056a3STejun Heo  *
1375c8e55f36STejun Heo  * Destroy @worker and adjust @gcwq stats accordingly.
1376c8e55f36STejun Heo  *
1377c8e55f36STejun Heo  * CONTEXT:
1378c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1379c34056a3STejun Heo  */
1380c34056a3STejun Heo static void destroy_worker(struct worker *worker)
1381c34056a3STejun Heo {
13828b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1383c34056a3STejun Heo 	int id = worker->id;
1384c34056a3STejun Heo 
1385c34056a3STejun Heo 	/* sanity check frenzy */
1386c34056a3STejun Heo 	BUG_ON(worker->current_work);
1387affee4b2STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1388c34056a3STejun Heo 
1389c8e55f36STejun Heo 	if (worker->flags & WORKER_STARTED)
1390c8e55f36STejun Heo 		gcwq->nr_workers--;
1391c8e55f36STejun Heo 	if (worker->flags & WORKER_IDLE)
1392c8e55f36STejun Heo 		gcwq->nr_idle--;
1393c8e55f36STejun Heo 
1394c8e55f36STejun Heo 	list_del_init(&worker->entry);
1395cb444766STejun Heo 	worker->flags |= WORKER_DIE;
1396c8e55f36STejun Heo 
1397c8e55f36STejun Heo 	spin_unlock_irq(&gcwq->lock);
1398c8e55f36STejun Heo 
1399c34056a3STejun Heo 	kthread_stop(worker->task);
1400c34056a3STejun Heo 	kfree(worker);
1401c34056a3STejun Heo 
14028b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
14038b03ae3cSTejun Heo 	ida_remove(&gcwq->worker_ida, id);
1404c34056a3STejun Heo }
1405c34056a3STejun Heo 
1406e22bee78STejun Heo static void idle_worker_timeout(unsigned long __gcwq)
1407e22bee78STejun Heo {
1408e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1409e22bee78STejun Heo 
1410e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1411e22bee78STejun Heo 
1412e22bee78STejun Heo 	if (too_many_workers(gcwq)) {
1413e22bee78STejun Heo 		struct worker *worker;
1414e22bee78STejun Heo 		unsigned long expires;
1415e22bee78STejun Heo 
1416e22bee78STejun Heo 		/* idle_list is kept in LIFO order, check the last one */
1417e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1418e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1419e22bee78STejun Heo 
1420e22bee78STejun Heo 		if (time_before(jiffies, expires))
1421e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1422e22bee78STejun Heo 		else {
1423e22bee78STejun Heo 			/* it's been idle for too long, wake up manager */
1424e22bee78STejun Heo 			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1425e22bee78STejun Heo 			wake_up_worker(gcwq);
1426e22bee78STejun Heo 		}
1427e22bee78STejun Heo 	}
1428e22bee78STejun Heo 
1429e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1430e22bee78STejun Heo }
1431e22bee78STejun Heo 
1432e22bee78STejun Heo static bool send_mayday(struct work_struct *work)
1433e22bee78STejun Heo {
1434e22bee78STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1435e22bee78STejun Heo 	struct workqueue_struct *wq = cwq->wq;
1436f3421797STejun Heo 	unsigned int cpu;
1437e22bee78STejun Heo 
1438e22bee78STejun Heo 	if (!(wq->flags & WQ_RESCUER))
1439e22bee78STejun Heo 		return false;
1440e22bee78STejun Heo 
1441e22bee78STejun Heo 	/* mayday mayday mayday */
1442f3421797STejun Heo 	cpu = cwq->gcwq->cpu;
1443f3421797STejun Heo 	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1444f3421797STejun Heo 	if (cpu == WORK_CPU_UNBOUND)
1445f3421797STejun Heo 		cpu = 0;
1446f2e005aaSTejun Heo 	if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1447e22bee78STejun Heo 		wake_up_process(wq->rescuer->task);
1448e22bee78STejun Heo 	return true;
1449e22bee78STejun Heo }
1450e22bee78STejun Heo 
1451e22bee78STejun Heo static void gcwq_mayday_timeout(unsigned long __gcwq)
1452e22bee78STejun Heo {
1453e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1454e22bee78STejun Heo 	struct work_struct *work;
1455e22bee78STejun Heo 
1456e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1457e22bee78STejun Heo 
1458e22bee78STejun Heo 	if (need_to_create_worker(gcwq)) {
1459e22bee78STejun Heo 		/*
1460e22bee78STejun Heo 		 * We've been trying to create a new worker but
1461e22bee78STejun Heo 		 * haven't been successful.  We might be hitting an
1462e22bee78STejun Heo 		 * allocation deadlock.  Send distress signals to
1463e22bee78STejun Heo 		 * rescuers.
1464e22bee78STejun Heo 		 */
1465e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry)
1466e22bee78STejun Heo 			send_mayday(work);
1467e22bee78STejun Heo 	}
1468e22bee78STejun Heo 
1469e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1470e22bee78STejun Heo 
1471e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1472e22bee78STejun Heo }
1473e22bee78STejun Heo 
1474e22bee78STejun Heo /**
1475e22bee78STejun Heo  * maybe_create_worker - create a new worker if necessary
1476e22bee78STejun Heo  * @gcwq: gcwq to create a new worker for
1477e22bee78STejun Heo  *
1478e22bee78STejun Heo  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1479e22bee78STejun Heo  * have at least one idle worker on return from this function.  If
1480e22bee78STejun Heo  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1481e22bee78STejun Heo  * sent to all rescuers with works scheduled on @gcwq to resolve
1482e22bee78STejun Heo  * possible allocation deadlock.
1483e22bee78STejun Heo  *
1484e22bee78STejun Heo  * On return, need_to_create_worker() is guaranteed to be false and
1485e22bee78STejun Heo  * may_start_working() true.
1486e22bee78STejun Heo  *
1487e22bee78STejun Heo  * LOCKING:
1488e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1489e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.  Called only from
1490e22bee78STejun Heo  * manager.
1491e22bee78STejun Heo  *
1492e22bee78STejun Heo  * RETURNS:
1493e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1494e22bee78STejun Heo  * otherwise.
1495e22bee78STejun Heo  */
1496e22bee78STejun Heo static bool maybe_create_worker(struct global_cwq *gcwq)
149706bd6ebfSNamhyung Kim __releases(&gcwq->lock)
149806bd6ebfSNamhyung Kim __acquires(&gcwq->lock)
1499e22bee78STejun Heo {
1500e22bee78STejun Heo 	if (!need_to_create_worker(gcwq))
1501e22bee78STejun Heo 		return false;
1502e22bee78STejun Heo restart:
15039f9c2364STejun Heo 	spin_unlock_irq(&gcwq->lock);
15049f9c2364STejun Heo 
1505e22bee78STejun Heo 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1506e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1507e22bee78STejun Heo 
1508e22bee78STejun Heo 	while (true) {
1509e22bee78STejun Heo 		struct worker *worker;
1510e22bee78STejun Heo 
1511e22bee78STejun Heo 		worker = create_worker(gcwq, true);
1512e22bee78STejun Heo 		if (worker) {
1513e22bee78STejun Heo 			del_timer_sync(&gcwq->mayday_timer);
1514e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
1515e22bee78STejun Heo 			start_worker(worker);
1516e22bee78STejun Heo 			BUG_ON(need_to_create_worker(gcwq));
1517e22bee78STejun Heo 			return true;
1518e22bee78STejun Heo 		}
1519e22bee78STejun Heo 
1520e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1521e22bee78STejun Heo 			break;
1522e22bee78STejun Heo 
1523e22bee78STejun Heo 		__set_current_state(TASK_INTERRUPTIBLE);
1524e22bee78STejun Heo 		schedule_timeout(CREATE_COOLDOWN);
15259f9c2364STejun Heo 
1526e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1527e22bee78STejun Heo 			break;
1528e22bee78STejun Heo 	}
1529e22bee78STejun Heo 
1530e22bee78STejun Heo 	del_timer_sync(&gcwq->mayday_timer);
1531e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1532e22bee78STejun Heo 	if (need_to_create_worker(gcwq))
1533e22bee78STejun Heo 		goto restart;
1534e22bee78STejun Heo 	return true;
1535e22bee78STejun Heo }
1536e22bee78STejun Heo 
1537e22bee78STejun Heo /**
1538e22bee78STejun Heo  * maybe_destroy_worker - destroy workers which have been idle for a while
1539e22bee78STejun Heo  * @gcwq: gcwq to destroy workers for
1540e22bee78STejun Heo  *
1541e22bee78STejun Heo  * Destroy @gcwq workers which have been idle for longer than
1542e22bee78STejun Heo  * IDLE_WORKER_TIMEOUT.
1543e22bee78STejun Heo  *
1544e22bee78STejun Heo  * LOCKING:
1545e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1546e22bee78STejun Heo  * multiple times.  Called only from manager.
1547e22bee78STejun Heo  *
1548e22bee78STejun Heo  * RETURNS:
1549e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1550e22bee78STejun Heo  * otherwise.
1551e22bee78STejun Heo  */
1552e22bee78STejun Heo static bool maybe_destroy_workers(struct global_cwq *gcwq)
1553e22bee78STejun Heo {
1554e22bee78STejun Heo 	bool ret = false;
1555e22bee78STejun Heo 
1556e22bee78STejun Heo 	while (too_many_workers(gcwq)) {
1557e22bee78STejun Heo 		struct worker *worker;
1558e22bee78STejun Heo 		unsigned long expires;
1559e22bee78STejun Heo 
1560e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1561e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1562e22bee78STejun Heo 
1563e22bee78STejun Heo 		if (time_before(jiffies, expires)) {
1564e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1565e22bee78STejun Heo 			break;
1566e22bee78STejun Heo 		}
1567e22bee78STejun Heo 
1568e22bee78STejun Heo 		destroy_worker(worker);
1569e22bee78STejun Heo 		ret = true;
1570e22bee78STejun Heo 	}
1571e22bee78STejun Heo 
1572e22bee78STejun Heo 	return ret;
1573e22bee78STejun Heo }
1574e22bee78STejun Heo 
1575e22bee78STejun Heo /**
1576e22bee78STejun Heo  * manage_workers - manage worker pool
1577e22bee78STejun Heo  * @worker: self
1578e22bee78STejun Heo  *
1579e22bee78STejun Heo  * Assume the manager role and manage gcwq worker pool @worker belongs
1580e22bee78STejun Heo  * to.  At any given time, there can be only zero or one manager per
1581e22bee78STejun Heo  * gcwq.  The exclusion is handled automatically by this function.
1582e22bee78STejun Heo  *
1583e22bee78STejun Heo  * The caller can safely start processing works on false return.  On
1584e22bee78STejun Heo  * true return, it's guaranteed that need_to_create_worker() is false
1585e22bee78STejun Heo  * and may_start_working() is true.
1586e22bee78STejun Heo  *
1587e22bee78STejun Heo  * CONTEXT:
1588e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1589e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.
1590e22bee78STejun Heo  *
1591e22bee78STejun Heo  * RETURNS:
1592e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true if
1593e22bee78STejun Heo  * some action was taken.
1594e22bee78STejun Heo  */
1595e22bee78STejun Heo static bool manage_workers(struct worker *worker)
1596e22bee78STejun Heo {
1597e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1598e22bee78STejun Heo 	bool ret = false;
1599e22bee78STejun Heo 
1600e22bee78STejun Heo 	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1601e22bee78STejun Heo 		return ret;
1602e22bee78STejun Heo 
1603e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1604e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1605e22bee78STejun Heo 
1606e22bee78STejun Heo 	/*
1607e22bee78STejun Heo 	 * Destroy and then create so that may_start_working() is true
1608e22bee78STejun Heo 	 * on return.
1609e22bee78STejun Heo 	 */
1610e22bee78STejun Heo 	ret |= maybe_destroy_workers(gcwq);
1611e22bee78STejun Heo 	ret |= maybe_create_worker(gcwq);
1612e22bee78STejun Heo 
1613e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1614e22bee78STejun Heo 
1615e22bee78STejun Heo 	/*
1616e22bee78STejun Heo 	 * The trustee might be waiting to take over the manager
1617e22bee78STejun Heo 	 * position, tell it we're done.
1618e22bee78STejun Heo 	 */
1619e22bee78STejun Heo 	if (unlikely(gcwq->trustee))
1620e22bee78STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1621e22bee78STejun Heo 
1622e22bee78STejun Heo 	return ret;
1623e22bee78STejun Heo }
1624e22bee78STejun Heo 
1625a62428c0STejun Heo /**
1626affee4b2STejun Heo  * move_linked_works - move linked works to a list
1627affee4b2STejun Heo  * @work: start of series of works to be scheduled
1628affee4b2STejun Heo  * @head: target list to append @work to
1629affee4b2STejun Heo  * @nextp: out paramter for nested worklist walking
1630affee4b2STejun Heo  *
1631affee4b2STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
1632affee4b2STejun Heo  * be scheduled starts at @work and includes any consecutive work with
1633affee4b2STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
1634affee4b2STejun Heo  *
1635affee4b2STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
1636affee4b2STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
1637affee4b2STejun Heo  * nested inside outer list_for_each_entry_safe().
1638affee4b2STejun Heo  *
1639affee4b2STejun Heo  * CONTEXT:
16408b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1641affee4b2STejun Heo  */
1642affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
1643affee4b2STejun Heo 			      struct work_struct **nextp)
1644affee4b2STejun Heo {
1645affee4b2STejun Heo 	struct work_struct *n;
1646affee4b2STejun Heo 
1647affee4b2STejun Heo 	/*
1648affee4b2STejun Heo 	 * Linked worklist will always end before the end of the list,
1649affee4b2STejun Heo 	 * use NULL for list head.
1650affee4b2STejun Heo 	 */
1651affee4b2STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1652affee4b2STejun Heo 		list_move_tail(&work->entry, head);
1653affee4b2STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1654affee4b2STejun Heo 			break;
1655affee4b2STejun Heo 	}
1656affee4b2STejun Heo 
1657affee4b2STejun Heo 	/*
1658affee4b2STejun Heo 	 * If we're already inside safe list traversal and have moved
1659affee4b2STejun Heo 	 * multiple works to the scheduled queue, the next position
1660affee4b2STejun Heo 	 * needs to be updated.
1661affee4b2STejun Heo 	 */
1662affee4b2STejun Heo 	if (nextp)
1663affee4b2STejun Heo 		*nextp = n;
1664affee4b2STejun Heo }
1665affee4b2STejun Heo 
16661e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
16671e19ffc6STejun Heo {
16681e19ffc6STejun Heo 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
16691da177e4SLinus Torvalds 						    struct work_struct, entry);
1670649027d7STejun Heo 	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
16711e19ffc6STejun Heo 
1672649027d7STejun Heo 	move_linked_works(work, pos, NULL);
1673*8a2e8e5dSTejun Heo 	__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
16741e19ffc6STejun Heo 	cwq->nr_active++;
16751e19ffc6STejun Heo }
16761e19ffc6STejun Heo 
1677affee4b2STejun Heo /**
167873f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
167973f53c4aSTejun Heo  * @cwq: cwq of interest
168073f53c4aSTejun Heo  * @color: color of work which left the queue
1681*8a2e8e5dSTejun Heo  * @delayed: for a delayed work
168273f53c4aSTejun Heo  *
168373f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
168473f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
168573f53c4aSTejun Heo  *
168673f53c4aSTejun Heo  * CONTEXT:
16878b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
168873f53c4aSTejun Heo  */
1689*8a2e8e5dSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1690*8a2e8e5dSTejun Heo 				 bool delayed)
169173f53c4aSTejun Heo {
169273f53c4aSTejun Heo 	/* ignore uncolored works */
169373f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
169473f53c4aSTejun Heo 		return;
169573f53c4aSTejun Heo 
169673f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
16971e19ffc6STejun Heo 
1698*8a2e8e5dSTejun Heo 	if (!delayed) {
1699*8a2e8e5dSTejun Heo 		cwq->nr_active--;
1700502ca9d8STejun Heo 		if (!list_empty(&cwq->delayed_works)) {
17011e19ffc6STejun Heo 			/* one down, submit a delayed one */
1702502ca9d8STejun Heo 			if (cwq->nr_active < cwq->max_active)
17031e19ffc6STejun Heo 				cwq_activate_first_delayed(cwq);
1704502ca9d8STejun Heo 		}
1705*8a2e8e5dSTejun Heo 	}
170673f53c4aSTejun Heo 
170773f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
170873f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
170973f53c4aSTejun Heo 		return;
171073f53c4aSTejun Heo 
171173f53c4aSTejun Heo 	/* are there still in-flight works? */
171273f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
171373f53c4aSTejun Heo 		return;
171473f53c4aSTejun Heo 
171573f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
171673f53c4aSTejun Heo 	cwq->flush_color = -1;
171773f53c4aSTejun Heo 
171873f53c4aSTejun Heo 	/*
171973f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
172073f53c4aSTejun Heo 	 * will handle the rest.
172173f53c4aSTejun Heo 	 */
172273f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
172373f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
172473f53c4aSTejun Heo }
172573f53c4aSTejun Heo 
172673f53c4aSTejun Heo /**
1727a62428c0STejun Heo  * process_one_work - process single work
1728c34056a3STejun Heo  * @worker: self
1729a62428c0STejun Heo  * @work: work to process
1730a62428c0STejun Heo  *
1731a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
1732a62428c0STejun Heo  * process a single work including synchronization against and
1733a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
1734a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
1735a62428c0STejun Heo  * call this function to process a work.
1736a62428c0STejun Heo  *
1737a62428c0STejun Heo  * CONTEXT:
17388b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1739a62428c0STejun Heo  */
1740c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
174106bd6ebfSNamhyung Kim __releases(&gcwq->lock)
174206bd6ebfSNamhyung Kim __acquires(&gcwq->lock)
17431da177e4SLinus Torvalds {
17447e11629dSTejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
17458b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
1746c8e55f36STejun Heo 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
1747fb0e7bebSTejun Heo 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
17486bb49e59SDavid Howells 	work_func_t f = work->func;
174973f53c4aSTejun Heo 	int work_color;
17507e11629dSTejun Heo 	struct worker *collision;
17514e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
17524e6045f1SJohannes Berg 	/*
1753a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
1754a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
1755a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
1756a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
1757a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
17584e6045f1SJohannes Berg 	 */
17594e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
17604e6045f1SJohannes Berg #endif
17617e11629dSTejun Heo 	/*
17627e11629dSTejun Heo 	 * A single work shouldn't be executed concurrently by
17637e11629dSTejun Heo 	 * multiple workers on a single cpu.  Check whether anyone is
17647e11629dSTejun Heo 	 * already processing the work.  If so, defer the work to the
17657e11629dSTejun Heo 	 * currently executing one.
17667e11629dSTejun Heo 	 */
17677e11629dSTejun Heo 	collision = __find_worker_executing_work(gcwq, bwh, work);
17687e11629dSTejun Heo 	if (unlikely(collision)) {
17697e11629dSTejun Heo 		move_linked_works(work, &collision->scheduled, NULL);
17707e11629dSTejun Heo 		return;
17717e11629dSTejun Heo 	}
17721da177e4SLinus Torvalds 
1773a62428c0STejun Heo 	/* claim and process */
17741da177e4SLinus Torvalds 	debug_work_deactivate(work);
1775c8e55f36STejun Heo 	hlist_add_head(&worker->hentry, bwh);
1776c34056a3STejun Heo 	worker->current_work = work;
17778cca0eeaSTejun Heo 	worker->current_cwq = cwq;
177873f53c4aSTejun Heo 	work_color = get_work_color(work);
17797a22ad75STejun Heo 
17807a22ad75STejun Heo 	/* record the current cpu number in the work data and dequeue */
17817a22ad75STejun Heo 	set_work_cpu(work, gcwq->cpu);
1782a62428c0STejun Heo 	list_del_init(&work->entry);
1783a62428c0STejun Heo 
1784649027d7STejun Heo 	/*
1785649027d7STejun Heo 	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1786649027d7STejun Heo 	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1787649027d7STejun Heo 	 */
1788649027d7STejun Heo 	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1789649027d7STejun Heo 		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1790649027d7STejun Heo 						struct work_struct, entry);
1791649027d7STejun Heo 
1792649027d7STejun Heo 		if (!list_empty(&gcwq->worklist) &&
1793649027d7STejun Heo 		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1794649027d7STejun Heo 			wake_up_worker(gcwq);
1795649027d7STejun Heo 		else
1796649027d7STejun Heo 			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1797649027d7STejun Heo 	}
1798649027d7STejun Heo 
1799fb0e7bebSTejun Heo 	/*
1800fb0e7bebSTejun Heo 	 * CPU intensive works don't participate in concurrency
1801fb0e7bebSTejun Heo 	 * management.  They're the scheduler's responsibility.
1802fb0e7bebSTejun Heo 	 */
1803fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
1804fb0e7bebSTejun Heo 		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1805fb0e7bebSTejun Heo 
18068b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
18071da177e4SLinus Torvalds 
180823b2e599SOleg Nesterov 	work_clear_pending(work);
18093295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
18103295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
181165f27f38SDavid Howells 	f(work);
18123295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
18133295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
18141da177e4SLinus Torvalds 
1815d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1816d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1817d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
1818a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
1819d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
1820d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
1821d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
1822d5abe669SPeter Zijlstra 		dump_stack();
1823d5abe669SPeter Zijlstra 	}
1824d5abe669SPeter Zijlstra 
18258b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1826a62428c0STejun Heo 
1827fb0e7bebSTejun Heo 	/* clear cpu intensive status */
1828fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
1829fb0e7bebSTejun Heo 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1830fb0e7bebSTejun Heo 
1831a62428c0STejun Heo 	/* we're done with it, release */
1832c8e55f36STejun Heo 	hlist_del_init(&worker->hentry);
1833c34056a3STejun Heo 	worker->current_work = NULL;
18348cca0eeaSTejun Heo 	worker->current_cwq = NULL;
1835*8a2e8e5dSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color, false);
18361da177e4SLinus Torvalds }
18371da177e4SLinus Torvalds 
1838affee4b2STejun Heo /**
1839affee4b2STejun Heo  * process_scheduled_works - process scheduled works
1840affee4b2STejun Heo  * @worker: self
1841affee4b2STejun Heo  *
1842affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
1843affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
1844affee4b2STejun Heo  * fetches a work from the top and executes it.
1845affee4b2STejun Heo  *
1846affee4b2STejun Heo  * CONTEXT:
18478b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1848affee4b2STejun Heo  * multiple times.
1849affee4b2STejun Heo  */
1850affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
18511da177e4SLinus Torvalds {
1852affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
1853affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
1854a62428c0STejun Heo 						struct work_struct, entry);
1855c34056a3STejun Heo 		process_one_work(worker, work);
1856a62428c0STejun Heo 	}
18571da177e4SLinus Torvalds }
18581da177e4SLinus Torvalds 
18594690c4abSTejun Heo /**
18604690c4abSTejun Heo  * worker_thread - the worker thread function
1861c34056a3STejun Heo  * @__worker: self
18624690c4abSTejun Heo  *
1863e22bee78STejun Heo  * The gcwq worker thread function.  There's a single dynamic pool of
1864e22bee78STejun Heo  * these per each cpu.  These workers process all works regardless of
1865e22bee78STejun Heo  * their specific target workqueue.  The only exception is works which
1866e22bee78STejun Heo  * belong to workqueues with a rescuer which will be explained in
1867e22bee78STejun Heo  * rescuer_thread().
18684690c4abSTejun Heo  */
1869c34056a3STejun Heo static int worker_thread(void *__worker)
18701da177e4SLinus Torvalds {
1871c34056a3STejun Heo 	struct worker *worker = __worker;
18728b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
18731da177e4SLinus Torvalds 
1874e22bee78STejun Heo 	/* tell the scheduler that this is a workqueue worker */
1875e22bee78STejun Heo 	worker->task->flags |= PF_WQ_WORKER;
1876c8e55f36STejun Heo woke_up:
18778b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1878affee4b2STejun Heo 
1879c8e55f36STejun Heo 	/* DIE can be set only while we're idle, checking here is enough */
1880c8e55f36STejun Heo 	if (worker->flags & WORKER_DIE) {
1881c8e55f36STejun Heo 		spin_unlock_irq(&gcwq->lock);
1882e22bee78STejun Heo 		worker->task->flags &= ~PF_WQ_WORKER;
1883c8e55f36STejun Heo 		return 0;
1884c8e55f36STejun Heo 	}
1885c8e55f36STejun Heo 
1886c8e55f36STejun Heo 	worker_leave_idle(worker);
1887db7bccf4STejun Heo recheck:
1888e22bee78STejun Heo 	/* no more worker necessary? */
1889e22bee78STejun Heo 	if (!need_more_worker(gcwq))
1890e22bee78STejun Heo 		goto sleep;
1891e22bee78STejun Heo 
1892e22bee78STejun Heo 	/* do we need to manage? */
1893e22bee78STejun Heo 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1894e22bee78STejun Heo 		goto recheck;
1895e22bee78STejun Heo 
1896c8e55f36STejun Heo 	/*
1897c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
1898c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
1899c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
1900c8e55f36STejun Heo 	 */
1901c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1902c8e55f36STejun Heo 
1903e22bee78STejun Heo 	/*
1904e22bee78STejun Heo 	 * When control reaches this point, we're guaranteed to have
1905e22bee78STejun Heo 	 * at least one idle worker or that someone else has already
1906e22bee78STejun Heo 	 * assumed the manager role.
1907e22bee78STejun Heo 	 */
1908e22bee78STejun Heo 	worker_clr_flags(worker, WORKER_PREP);
1909e22bee78STejun Heo 
1910e22bee78STejun Heo 	do {
1911affee4b2STejun Heo 		struct work_struct *work =
19127e11629dSTejun Heo 			list_first_entry(&gcwq->worklist,
1913affee4b2STejun Heo 					 struct work_struct, entry);
1914affee4b2STejun Heo 
1915c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1916affee4b2STejun Heo 			/* optimization path, not strictly necessary */
1917affee4b2STejun Heo 			process_one_work(worker, work);
1918affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
1919affee4b2STejun Heo 				process_scheduled_works(worker);
1920affee4b2STejun Heo 		} else {
1921c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
1922affee4b2STejun Heo 			process_scheduled_works(worker);
1923affee4b2STejun Heo 		}
1924e22bee78STejun Heo 	} while (keep_working(gcwq));
1925affee4b2STejun Heo 
1926e22bee78STejun Heo 	worker_set_flags(worker, WORKER_PREP, false);
1927d313dd85STejun Heo sleep:
1928e22bee78STejun Heo 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1929e22bee78STejun Heo 		goto recheck;
1930d313dd85STejun Heo 
1931c8e55f36STejun Heo 	/*
1932e22bee78STejun Heo 	 * gcwq->lock is held and there's no work to process and no
1933e22bee78STejun Heo 	 * need to manage, sleep.  Workers are woken up only while
1934e22bee78STejun Heo 	 * holding gcwq->lock or from local cpu, so setting the
1935e22bee78STejun Heo 	 * current state before releasing gcwq->lock is enough to
1936e22bee78STejun Heo 	 * prevent losing any event.
1937c8e55f36STejun Heo 	 */
1938c8e55f36STejun Heo 	worker_enter_idle(worker);
1939c8e55f36STejun Heo 	__set_current_state(TASK_INTERRUPTIBLE);
19408b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
19411da177e4SLinus Torvalds 	schedule();
1942c8e55f36STejun Heo 	goto woke_up;
19431da177e4SLinus Torvalds }
19441da177e4SLinus Torvalds 
1945e22bee78STejun Heo /**
1946e22bee78STejun Heo  * rescuer_thread - the rescuer thread function
1947e22bee78STejun Heo  * @__wq: the associated workqueue
1948e22bee78STejun Heo  *
1949e22bee78STejun Heo  * Workqueue rescuer thread function.  There's one rescuer for each
1950e22bee78STejun Heo  * workqueue which has WQ_RESCUER set.
1951e22bee78STejun Heo  *
1952e22bee78STejun Heo  * Regular work processing on a gcwq may block trying to create a new
1953e22bee78STejun Heo  * worker which uses GFP_KERNEL allocation which has slight chance of
1954e22bee78STejun Heo  * developing into deadlock if some works currently on the same queue
1955e22bee78STejun Heo  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
1956e22bee78STejun Heo  * the problem rescuer solves.
1957e22bee78STejun Heo  *
1958e22bee78STejun Heo  * When such condition is possible, the gcwq summons rescuers of all
1959e22bee78STejun Heo  * workqueues which have works queued on the gcwq and let them process
1960e22bee78STejun Heo  * those works so that forward progress can be guaranteed.
1961e22bee78STejun Heo  *
1962e22bee78STejun Heo  * This should happen rarely.
1963e22bee78STejun Heo  */
1964e22bee78STejun Heo static int rescuer_thread(void *__wq)
1965e22bee78STejun Heo {
1966e22bee78STejun Heo 	struct workqueue_struct *wq = __wq;
1967e22bee78STejun Heo 	struct worker *rescuer = wq->rescuer;
1968e22bee78STejun Heo 	struct list_head *scheduled = &rescuer->scheduled;
1969f3421797STejun Heo 	bool is_unbound = wq->flags & WQ_UNBOUND;
1970e22bee78STejun Heo 	unsigned int cpu;
1971e22bee78STejun Heo 
1972e22bee78STejun Heo 	set_user_nice(current, RESCUER_NICE_LEVEL);
1973e22bee78STejun Heo repeat:
1974e22bee78STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);
19751da177e4SLinus Torvalds 
19761da177e4SLinus Torvalds 	if (kthread_should_stop())
1977e22bee78STejun Heo 		return 0;
19781da177e4SLinus Torvalds 
1979f3421797STejun Heo 	/*
1980f3421797STejun Heo 	 * See whether any cpu is asking for help.  Unbounded
1981f3421797STejun Heo 	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1982f3421797STejun Heo 	 */
1983f2e005aaSTejun Heo 	for_each_mayday_cpu(cpu, wq->mayday_mask) {
1984f3421797STejun Heo 		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
1985f3421797STejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
1986e22bee78STejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
1987e22bee78STejun Heo 		struct work_struct *work, *n;
1988e22bee78STejun Heo 
1989e22bee78STejun Heo 		__set_current_state(TASK_RUNNING);
1990f2e005aaSTejun Heo 		mayday_clear_cpu(cpu, wq->mayday_mask);
1991e22bee78STejun Heo 
1992e22bee78STejun Heo 		/* migrate to the target cpu if possible */
1993e22bee78STejun Heo 		rescuer->gcwq = gcwq;
1994e22bee78STejun Heo 		worker_maybe_bind_and_lock(rescuer);
1995e22bee78STejun Heo 
1996e22bee78STejun Heo 		/*
1997e22bee78STejun Heo 		 * Slurp in all works issued via this workqueue and
1998e22bee78STejun Heo 		 * process'em.
1999e22bee78STejun Heo 		 */
2000e22bee78STejun Heo 		BUG_ON(!list_empty(&rescuer->scheduled));
2001e22bee78STejun Heo 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2002e22bee78STejun Heo 			if (get_work_cwq(work) == cwq)
2003e22bee78STejun Heo 				move_linked_works(work, scheduled, &n);
2004e22bee78STejun Heo 
2005e22bee78STejun Heo 		process_scheduled_works(rescuer);
2006e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
20071da177e4SLinus Torvalds 	}
20081da177e4SLinus Torvalds 
2009e22bee78STejun Heo 	schedule();
2010e22bee78STejun Heo 	goto repeat;
20111da177e4SLinus Torvalds }
20121da177e4SLinus Torvalds 
2013fc2e4d70SOleg Nesterov struct wq_barrier {
2014fc2e4d70SOleg Nesterov 	struct work_struct	work;
2015fc2e4d70SOleg Nesterov 	struct completion	done;
2016fc2e4d70SOleg Nesterov };
2017fc2e4d70SOleg Nesterov 
2018fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
2019fc2e4d70SOleg Nesterov {
2020fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2021fc2e4d70SOleg Nesterov 	complete(&barr->done);
2022fc2e4d70SOleg Nesterov }
2023fc2e4d70SOleg Nesterov 
20244690c4abSTejun Heo /**
20254690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
20264690c4abSTejun Heo  * @cwq: cwq to insert barrier into
20274690c4abSTejun Heo  * @barr: wq_barrier to insert
2028affee4b2STejun Heo  * @target: target work to attach @barr to
2029affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
20304690c4abSTejun Heo  *
2031affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
2032affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
2033affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
2034affee4b2STejun Heo  * cpu.
2035affee4b2STejun Heo  *
2036affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
2037affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
2038affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
2039affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
2040affee4b2STejun Heo  * after a work with LINKED flag set.
2041affee4b2STejun Heo  *
2042affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
2043affee4b2STejun Heo  * underneath us, so we can't reliably determine cwq from @target.
20444690c4abSTejun Heo  *
20454690c4abSTejun Heo  * CONTEXT:
20468b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
20474690c4abSTejun Heo  */
204883c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2049affee4b2STejun Heo 			      struct wq_barrier *barr,
2050affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
2051fc2e4d70SOleg Nesterov {
2052affee4b2STejun Heo 	struct list_head *head;
2053affee4b2STejun Heo 	unsigned int linked = 0;
2054affee4b2STejun Heo 
2055dc186ad7SThomas Gleixner 	/*
20568b03ae3cSTejun Heo 	 * debugobject calls are safe here even with gcwq->lock locked
2057dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
2058dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
2059dc186ad7SThomas Gleixner 	 * might deadlock.
2060dc186ad7SThomas Gleixner 	 */
2061dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
206222df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2063fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
206483c22520SOleg Nesterov 
2065affee4b2STejun Heo 	/*
2066affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
2067affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
2068affee4b2STejun Heo 	 */
2069affee4b2STejun Heo 	if (worker)
2070affee4b2STejun Heo 		head = worker->scheduled.next;
2071affee4b2STejun Heo 	else {
2072affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
2073affee4b2STejun Heo 
2074affee4b2STejun Heo 		head = target->entry.next;
2075affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
2076affee4b2STejun Heo 		linked = *bits & WORK_STRUCT_LINKED;
2077affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2078affee4b2STejun Heo 	}
2079affee4b2STejun Heo 
2080dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
2081affee4b2STejun Heo 	insert_work(cwq, &barr->work, head,
2082affee4b2STejun Heo 		    work_color_to_flags(WORK_NO_COLOR) | linked);
2083fc2e4d70SOleg Nesterov }
2084fc2e4d70SOleg Nesterov 
208573f53c4aSTejun Heo /**
208673f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
208773f53c4aSTejun Heo  * @wq: workqueue being flushed
208873f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
208973f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
209073f53c4aSTejun Heo  *
209173f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
209273f53c4aSTejun Heo  *
209373f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
209473f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
209573f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
209673f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
209773f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
209873f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
209973f53c4aSTejun Heo  *
210073f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
210173f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
210273f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
210373f53c4aSTejun Heo  * is returned.
210473f53c4aSTejun Heo  *
210573f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
210673f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
210773f53c4aSTejun Heo  * advanced to @work_color.
210873f53c4aSTejun Heo  *
210973f53c4aSTejun Heo  * CONTEXT:
211073f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
211173f53c4aSTejun Heo  *
211273f53c4aSTejun Heo  * RETURNS:
211373f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
211473f53c4aSTejun Heo  * otherwise.
211573f53c4aSTejun Heo  */
211673f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
211773f53c4aSTejun Heo 				      int flush_color, int work_color)
21181da177e4SLinus Torvalds {
211973f53c4aSTejun Heo 	bool wait = false;
212073f53c4aSTejun Heo 	unsigned int cpu;
21211da177e4SLinus Torvalds 
212273f53c4aSTejun Heo 	if (flush_color >= 0) {
212373f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
212473f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
2125dc186ad7SThomas Gleixner 	}
212614441960SOleg Nesterov 
2127f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
212873f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
21298b03ae3cSTejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
21301da177e4SLinus Torvalds 
21318b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
213273f53c4aSTejun Heo 
213373f53c4aSTejun Heo 		if (flush_color >= 0) {
213473f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
213573f53c4aSTejun Heo 
213673f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
213773f53c4aSTejun Heo 				cwq->flush_color = flush_color;
213873f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
213973f53c4aSTejun Heo 				wait = true;
21401da177e4SLinus Torvalds 			}
214173f53c4aSTejun Heo 		}
214273f53c4aSTejun Heo 
214373f53c4aSTejun Heo 		if (work_color >= 0) {
214473f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
214573f53c4aSTejun Heo 			cwq->work_color = work_color;
214673f53c4aSTejun Heo 		}
214773f53c4aSTejun Heo 
21488b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
21491da177e4SLinus Torvalds 	}
21501da177e4SLinus Torvalds 
215173f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
215273f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
215373f53c4aSTejun Heo 
215473f53c4aSTejun Heo 	return wait;
215583c22520SOleg Nesterov }
21561da177e4SLinus Torvalds 
21570fcb78c2SRolf Eike Beer /**
21581da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
21590fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
21601da177e4SLinus Torvalds  *
21611da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
21621da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
21631da177e4SLinus Torvalds  *
2164fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
2165fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
21661da177e4SLinus Torvalds  */
21677ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
21681da177e4SLinus Torvalds {
216973f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
217073f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
217173f53c4aSTejun Heo 		.flush_color = -1,
217273f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
217373f53c4aSTejun Heo 	};
217473f53c4aSTejun Heo 	int next_color;
2175b1f4ec17SOleg Nesterov 
21763295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
21773295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
217873f53c4aSTejun Heo 
217973f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
218073f53c4aSTejun Heo 
218173f53c4aSTejun Heo 	/*
218273f53c4aSTejun Heo 	 * Start-to-wait phase
218373f53c4aSTejun Heo 	 */
218473f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
218573f53c4aSTejun Heo 
218673f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
218773f53c4aSTejun Heo 		/*
218873f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
218973f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
219073f53c4aSTejun Heo 		 * by one.
219173f53c4aSTejun Heo 		 */
219273f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
219373f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
219473f53c4aSTejun Heo 		wq->work_color = next_color;
219573f53c4aSTejun Heo 
219673f53c4aSTejun Heo 		if (!wq->first_flusher) {
219773f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
219873f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
219973f53c4aSTejun Heo 
220073f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
220173f53c4aSTejun Heo 
220273f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
220373f53c4aSTejun Heo 						       wq->work_color)) {
220473f53c4aSTejun Heo 				/* nothing to flush, done */
220573f53c4aSTejun Heo 				wq->flush_color = next_color;
220673f53c4aSTejun Heo 				wq->first_flusher = NULL;
220773f53c4aSTejun Heo 				goto out_unlock;
220873f53c4aSTejun Heo 			}
220973f53c4aSTejun Heo 		} else {
221073f53c4aSTejun Heo 			/* wait in queue */
221173f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
221273f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
221373f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
221473f53c4aSTejun Heo 		}
221573f53c4aSTejun Heo 	} else {
221673f53c4aSTejun Heo 		/*
221773f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
221873f53c4aSTejun Heo 		 * The next flush completion will assign us
221973f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
222073f53c4aSTejun Heo 		 */
222173f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
222273f53c4aSTejun Heo 	}
222373f53c4aSTejun Heo 
222473f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
222573f53c4aSTejun Heo 
222673f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
222773f53c4aSTejun Heo 
222873f53c4aSTejun Heo 	/*
222973f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
223073f53c4aSTejun Heo 	 *
223173f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
223273f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
223373f53c4aSTejun Heo 	 */
223473f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
223573f53c4aSTejun Heo 		return;
223673f53c4aSTejun Heo 
223773f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
223873f53c4aSTejun Heo 
22394ce48b37STejun Heo 	/* we might have raced, check again with mutex held */
22404ce48b37STejun Heo 	if (wq->first_flusher != &this_flusher)
22414ce48b37STejun Heo 		goto out_unlock;
22424ce48b37STejun Heo 
224373f53c4aSTejun Heo 	wq->first_flusher = NULL;
224473f53c4aSTejun Heo 
224573f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
224673f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
224773f53c4aSTejun Heo 
224873f53c4aSTejun Heo 	while (true) {
224973f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
225073f53c4aSTejun Heo 
225173f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
225273f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
225373f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
225473f53c4aSTejun Heo 				break;
225573f53c4aSTejun Heo 			list_del_init(&next->list);
225673f53c4aSTejun Heo 			complete(&next->done);
225773f53c4aSTejun Heo 		}
225873f53c4aSTejun Heo 
225973f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
226073f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
226173f53c4aSTejun Heo 
226273f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
226373f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
226473f53c4aSTejun Heo 
226573f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
226673f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
226773f53c4aSTejun Heo 			/*
226873f53c4aSTejun Heo 			 * Assign the same color to all overflowed
226973f53c4aSTejun Heo 			 * flushers, advance work_color and append to
227073f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
227173f53c4aSTejun Heo 			 * phase for these overflowed flushers.
227273f53c4aSTejun Heo 			 */
227373f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
227473f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
227573f53c4aSTejun Heo 
227673f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
227773f53c4aSTejun Heo 
227873f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
227973f53c4aSTejun Heo 					      &wq->flusher_queue);
228073f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
228173f53c4aSTejun Heo 		}
228273f53c4aSTejun Heo 
228373f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
228473f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
228573f53c4aSTejun Heo 			break;
228673f53c4aSTejun Heo 		}
228773f53c4aSTejun Heo 
228873f53c4aSTejun Heo 		/*
228973f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
229073f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
229173f53c4aSTejun Heo 		 */
229273f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
229373f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
229473f53c4aSTejun Heo 
229573f53c4aSTejun Heo 		list_del_init(&next->list);
229673f53c4aSTejun Heo 		wq->first_flusher = next;
229773f53c4aSTejun Heo 
229873f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
229973f53c4aSTejun Heo 			break;
230073f53c4aSTejun Heo 
230173f53c4aSTejun Heo 		/*
230273f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
230373f53c4aSTejun Heo 		 * flusher and repeat cascading.
230473f53c4aSTejun Heo 		 */
230573f53c4aSTejun Heo 		wq->first_flusher = NULL;
230673f53c4aSTejun Heo 	}
230773f53c4aSTejun Heo 
230873f53c4aSTejun Heo out_unlock:
230973f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
23101da177e4SLinus Torvalds }
2311ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
23121da177e4SLinus Torvalds 
2313db700897SOleg Nesterov /**
2314db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
2315db700897SOleg Nesterov  * @work: the work which is to be flushed
2316db700897SOleg Nesterov  *
2317a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
2318a67da70dSOleg Nesterov  *
2319db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
2320db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
2321db700897SOleg Nesterov  * sense to use this function.
2322db700897SOleg Nesterov  */
2323db700897SOleg Nesterov int flush_work(struct work_struct *work)
2324db700897SOleg Nesterov {
2325affee4b2STejun Heo 	struct worker *worker = NULL;
23268b03ae3cSTejun Heo 	struct global_cwq *gcwq;
2327db700897SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
2328db700897SOleg Nesterov 	struct wq_barrier barr;
2329db700897SOleg Nesterov 
2330db700897SOleg Nesterov 	might_sleep();
23317a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
23327a22ad75STejun Heo 	if (!gcwq)
2333db700897SOleg Nesterov 		return 0;
2334db700897SOleg Nesterov 
23358b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2336db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
2337db700897SOleg Nesterov 		/*
2338db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
23397a22ad75STejun Heo 		 * If it was re-queued to a different gcwq under us, we
23407a22ad75STejun Heo 		 * are not going to wait.
2341db700897SOleg Nesterov 		 */
2342db700897SOleg Nesterov 		smp_rmb();
23437a22ad75STejun Heo 		cwq = get_work_cwq(work);
23447a22ad75STejun Heo 		if (unlikely(!cwq || gcwq != cwq->gcwq))
23454690c4abSTejun Heo 			goto already_gone;
2346db700897SOleg Nesterov 	} else {
23477a22ad75STejun Heo 		worker = find_worker_executing_work(gcwq, work);
2348affee4b2STejun Heo 		if (!worker)
23494690c4abSTejun Heo 			goto already_gone;
23507a22ad75STejun Heo 		cwq = worker->current_cwq;
2351db700897SOleg Nesterov 	}
2352db700897SOleg Nesterov 
2353affee4b2STejun Heo 	insert_wq_barrier(cwq, &barr, work, worker);
23548b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
23557a22ad75STejun Heo 
2356db700897SOleg Nesterov 	lock_map_acquire(&cwq->wq->lockdep_map);
2357db700897SOleg Nesterov 	lock_map_release(&cwq->wq->lockdep_map);
2358db700897SOleg Nesterov 
2359db700897SOleg Nesterov 	wait_for_completion(&barr.done);
2360dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
2361db700897SOleg Nesterov 	return 1;
23624690c4abSTejun Heo already_gone:
23638b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
23644690c4abSTejun Heo 	return 0;
2365db700897SOleg Nesterov }
2366db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
2367db700897SOleg Nesterov 
23686e84d644SOleg Nesterov /*
23691f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
23706e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
23716e84d644SOleg Nesterov  */
23726e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
23736e84d644SOleg Nesterov {
23748b03ae3cSTejun Heo 	struct global_cwq *gcwq;
23751f1f642eSOleg Nesterov 	int ret = -1;
23766e84d644SOleg Nesterov 
237722df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
23781f1f642eSOleg Nesterov 		return 0;
23796e84d644SOleg Nesterov 
23806e84d644SOleg Nesterov 	/*
23816e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
23826e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
23836e84d644SOleg Nesterov 	 */
23847a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
23857a22ad75STejun Heo 	if (!gcwq)
23866e84d644SOleg Nesterov 		return ret;
23876e84d644SOleg Nesterov 
23888b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
23896e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
23906e84d644SOleg Nesterov 		/*
23917a22ad75STejun Heo 		 * This work is queued, but perhaps we locked the wrong gcwq.
23926e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
23936e84d644SOleg Nesterov 		 * insert_work()->wmb().
23946e84d644SOleg Nesterov 		 */
23956e84d644SOleg Nesterov 		smp_rmb();
23967a22ad75STejun Heo 		if (gcwq == get_work_gcwq(work)) {
2397dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
23986e84d644SOleg Nesterov 			list_del_init(&work->entry);
23997a22ad75STejun Heo 			cwq_dec_nr_in_flight(get_work_cwq(work),
2400*8a2e8e5dSTejun Heo 				get_work_color(work),
2401*8a2e8e5dSTejun Heo 				*work_data_bits(work) & WORK_STRUCT_DELAYED);
24026e84d644SOleg Nesterov 			ret = 1;
24036e84d644SOleg Nesterov 		}
24046e84d644SOleg Nesterov 	}
24058b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
24066e84d644SOleg Nesterov 
24076e84d644SOleg Nesterov 	return ret;
24086e84d644SOleg Nesterov }
24096e84d644SOleg Nesterov 
24107a22ad75STejun Heo static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2411b89deed3SOleg Nesterov {
2412b89deed3SOleg Nesterov 	struct wq_barrier barr;
2413affee4b2STejun Heo 	struct worker *worker;
2414b89deed3SOleg Nesterov 
24158b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2416b89deed3SOleg Nesterov 
24177a22ad75STejun Heo 	worker = find_worker_executing_work(gcwq, work);
24187a22ad75STejun Heo 	if (unlikely(worker))
24197a22ad75STejun Heo 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2420affee4b2STejun Heo 
24218b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
2422b89deed3SOleg Nesterov 
2423affee4b2STejun Heo 	if (unlikely(worker)) {
2424b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
2425dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
2426dc186ad7SThomas Gleixner 	}
2427b89deed3SOleg Nesterov }
2428b89deed3SOleg Nesterov 
24296e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
2430b89deed3SOleg Nesterov {
2431b1f4ec17SOleg Nesterov 	int cpu;
2432b89deed3SOleg Nesterov 
2433f293ea92SOleg Nesterov 	might_sleep();
2434f293ea92SOleg Nesterov 
24353295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
24363295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
24374e6045f1SJohannes Berg 
2438f3421797STejun Heo 	for_each_gcwq_cpu(cpu)
24397a22ad75STejun Heo 		wait_on_cpu_work(get_gcwq(cpu), work);
24406e84d644SOleg Nesterov }
24416e84d644SOleg Nesterov 
24421f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
24431f1f642eSOleg Nesterov 				struct timer_list* timer)
24441f1f642eSOleg Nesterov {
24451f1f642eSOleg Nesterov 	int ret;
24461f1f642eSOleg Nesterov 
24471f1f642eSOleg Nesterov 	do {
24481f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
24491f1f642eSOleg Nesterov 		if (!ret)
24501f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
24511f1f642eSOleg Nesterov 		wait_on_work(work);
24521f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
24531f1f642eSOleg Nesterov 
24547a22ad75STejun Heo 	clear_work_data(work);
24551f1f642eSOleg Nesterov 	return ret;
24561f1f642eSOleg Nesterov }
24571f1f642eSOleg Nesterov 
24586e84d644SOleg Nesterov /**
24596e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
24606e84d644SOleg Nesterov  * @work: the work which is to be flushed
24616e84d644SOleg Nesterov  *
24621f1f642eSOleg Nesterov  * Returns true if @work was pending.
24631f1f642eSOleg Nesterov  *
24646e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
24656e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
24666e84d644SOleg Nesterov  * has completed.
24676e84d644SOleg Nesterov  *
24686e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
24696e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
24706e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
24716e84d644SOleg Nesterov  * workqueue.
24726e84d644SOleg Nesterov  *
24736e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
24746e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
24756e84d644SOleg Nesterov  *
24766e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
24776e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
24786e84d644SOleg Nesterov  */
24791f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
24806e84d644SOleg Nesterov {
24811f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
2482b89deed3SOleg Nesterov }
248328e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
2484b89deed3SOleg Nesterov 
24856e84d644SOleg Nesterov /**
2486f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
24876e84d644SOleg Nesterov  * @dwork: the delayed work struct
24886e84d644SOleg Nesterov  *
24891f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
24901f1f642eSOleg Nesterov  *
24916e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
24926e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
24936e84d644SOleg Nesterov  */
24941f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
24956e84d644SOleg Nesterov {
24961f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
24976e84d644SOleg Nesterov }
2498f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
24991da177e4SLinus Torvalds 
25000fcb78c2SRolf Eike Beer /**
25010fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
25020fcb78c2SRolf Eike Beer  * @work: job to be done
25030fcb78c2SRolf Eike Beer  *
25045b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
25055b0f437dSBart Van Assche  * non-zero otherwise.
25065b0f437dSBart Van Assche  *
25075b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
25085b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
25095b0f437dSBart Van Assche  * workqueue otherwise.
25100fcb78c2SRolf Eike Beer  */
25117ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
25121da177e4SLinus Torvalds {
2513d320c038STejun Heo 	return queue_work(system_wq, work);
25141da177e4SLinus Torvalds }
2515ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
25161da177e4SLinus Torvalds 
2517c1a220e7SZhang Rui /*
2518c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
2519c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
2520c1a220e7SZhang Rui  * @work: job to be done
2521c1a220e7SZhang Rui  *
2522c1a220e7SZhang Rui  * This puts a job on a specific cpu
2523c1a220e7SZhang Rui  */
2524c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
2525c1a220e7SZhang Rui {
2526d320c038STejun Heo 	return queue_work_on(cpu, system_wq, work);
2527c1a220e7SZhang Rui }
2528c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
2529c1a220e7SZhang Rui 
25300fcb78c2SRolf Eike Beer /**
25310fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
253252bad64dSDavid Howells  * @dwork: job to be done
253352bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
25340fcb78c2SRolf Eike Beer  *
25350fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
25360fcb78c2SRolf Eike Beer  * workqueue.
25370fcb78c2SRolf Eike Beer  */
25387ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
253982f67cd9SIngo Molnar 					unsigned long delay)
25401da177e4SLinus Torvalds {
2541d320c038STejun Heo 	return queue_delayed_work(system_wq, dwork, delay);
25421da177e4SLinus Torvalds }
2543ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
25441da177e4SLinus Torvalds 
25450fcb78c2SRolf Eike Beer /**
25468c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
25478c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
25488c53e463SLinus Torvalds  *
25498c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
25508c53e463SLinus Torvalds  */
25518c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
25528c53e463SLinus Torvalds {
25538c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
25547a22ad75STejun Heo 		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
25554690c4abSTejun Heo 			     &dwork->work);
25568c53e463SLinus Torvalds 		put_cpu();
25578c53e463SLinus Torvalds 	}
25588c53e463SLinus Torvalds 	flush_work(&dwork->work);
25598c53e463SLinus Torvalds }
25608c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
25618c53e463SLinus Torvalds 
25628c53e463SLinus Torvalds /**
25630fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
25640fcb78c2SRolf Eike Beer  * @cpu: cpu to use
256552bad64dSDavid Howells  * @dwork: job to be done
25660fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
25670fcb78c2SRolf Eike Beer  *
25680fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
25690fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
25700fcb78c2SRolf Eike Beer  */
25711da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
257252bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
25731da177e4SLinus Torvalds {
2574d320c038STejun Heo 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
25751da177e4SLinus Torvalds }
2576ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
25771da177e4SLinus Torvalds 
2578b6136773SAndrew Morton /**
2579b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
2580b6136773SAndrew Morton  * @func: the function to call
2581b6136773SAndrew Morton  *
2582b6136773SAndrew Morton  * Returns zero on success.
2583b6136773SAndrew Morton  * Returns -ve errno on failure.
2584b6136773SAndrew Morton  *
2585b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
2586b6136773SAndrew Morton  */
258765f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
258815316ba8SChristoph Lameter {
258915316ba8SChristoph Lameter 	int cpu;
259038f51568SNamhyung Kim 	struct work_struct __percpu *works;
259115316ba8SChristoph Lameter 
2592b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
2593b6136773SAndrew Morton 	if (!works)
259415316ba8SChristoph Lameter 		return -ENOMEM;
2595b6136773SAndrew Morton 
259695402b38SGautham R Shenoy 	get_online_cpus();
259793981800STejun Heo 
259815316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
25999bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
26009bfb1839SIngo Molnar 
26019bfb1839SIngo Molnar 		INIT_WORK(work, func);
26028de6d308SOleg Nesterov 		schedule_work_on(cpu, work);
260315316ba8SChristoph Lameter 	}
260493981800STejun Heo 
260593981800STejun Heo 	for_each_online_cpu(cpu)
26068616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
260793981800STejun Heo 
260895402b38SGautham R Shenoy 	put_online_cpus();
2609b6136773SAndrew Morton 	free_percpu(works);
261015316ba8SChristoph Lameter 	return 0;
261115316ba8SChristoph Lameter }
261215316ba8SChristoph Lameter 
2613eef6a7d5SAlan Stern /**
2614eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
2615eef6a7d5SAlan Stern  *
2616eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
2617eef6a7d5SAlan Stern  * completion.
2618eef6a7d5SAlan Stern  *
2619eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
2620eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
2621eef6a7d5SAlan Stern  * will lead to deadlock:
2622eef6a7d5SAlan Stern  *
2623eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
2624eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
2625eef6a7d5SAlan Stern  *
2626eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
2627eef6a7d5SAlan Stern  *
2628eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
2629eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
2630eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
2631eef6a7d5SAlan Stern  *
2632eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
2633eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
2634eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
2635eef6a7d5SAlan Stern  * cancel_work_sync() instead.
2636eef6a7d5SAlan Stern  */
26371da177e4SLinus Torvalds void flush_scheduled_work(void)
26381da177e4SLinus Torvalds {
2639d320c038STejun Heo 	flush_workqueue(system_wq);
26401da177e4SLinus Torvalds }
2641ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
26421da177e4SLinus Torvalds 
26431da177e4SLinus Torvalds /**
26441fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
26451fa44ecaSJames Bottomley  * @fn:		the function to execute
26461fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
26471fa44ecaSJames Bottomley  *		be available when the work executes)
26481fa44ecaSJames Bottomley  *
26491fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
26501fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
26511fa44ecaSJames Bottomley  *
26521fa44ecaSJames Bottomley  * Returns:	0 - function was executed
26531fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
26541fa44ecaSJames Bottomley  */
265565f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
26561fa44ecaSJames Bottomley {
26571fa44ecaSJames Bottomley 	if (!in_interrupt()) {
265865f27f38SDavid Howells 		fn(&ew->work);
26591fa44ecaSJames Bottomley 		return 0;
26601fa44ecaSJames Bottomley 	}
26611fa44ecaSJames Bottomley 
266265f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
26631fa44ecaSJames Bottomley 	schedule_work(&ew->work);
26641fa44ecaSJames Bottomley 
26651fa44ecaSJames Bottomley 	return 1;
26661fa44ecaSJames Bottomley }
26671fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
26681fa44ecaSJames Bottomley 
26691da177e4SLinus Torvalds int keventd_up(void)
26701da177e4SLinus Torvalds {
2671d320c038STejun Heo 	return system_wq != NULL;
26721da177e4SLinus Torvalds }
26731da177e4SLinus Torvalds 
2674bdbc5dd7STejun Heo static int alloc_cwqs(struct workqueue_struct *wq)
26751da177e4SLinus Torvalds {
26763af24433SOleg Nesterov 	/*
26770f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
26780f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
26790f900049STejun Heo 	 * unsigned long long.
26803af24433SOleg Nesterov 	 */
26810f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
26820f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
26830f900049STejun Heo 				   __alignof__(unsigned long long));
2684931ac77eSTejun Heo #ifdef CONFIG_SMP
2685931ac77eSTejun Heo 	bool percpu = !(wq->flags & WQ_UNBOUND);
2686931ac77eSTejun Heo #else
2687931ac77eSTejun Heo 	bool percpu = false;
2688931ac77eSTejun Heo #endif
26893af24433SOleg Nesterov 
2690931ac77eSTejun Heo 	if (percpu)
2691f3421797STejun Heo 		wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2692931ac77eSTejun Heo 	else {
26930f900049STejun Heo 		void *ptr;
2694e1d8aa9fSFrederic Weisbecker 
26950f900049STejun Heo 		/*
2696f3421797STejun Heo 		 * Allocate enough room to align cwq and put an extra
2697f3421797STejun Heo 		 * pointer at the end pointing back to the originally
2698f3421797STejun Heo 		 * allocated pointer which will be used for free.
26990f900049STejun Heo 		 */
2700bdbc5dd7STejun Heo 		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2701bdbc5dd7STejun Heo 		if (ptr) {
2702bdbc5dd7STejun Heo 			wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2703bdbc5dd7STejun Heo 			*(void **)(wq->cpu_wq.single + 1) = ptr;
2704bdbc5dd7STejun Heo 		}
27053af24433SOleg Nesterov 	}
27063af24433SOleg Nesterov 
27070f900049STejun Heo 	/* just in case, make sure it's actually aligned */
2708bdbc5dd7STejun Heo 	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2709bdbc5dd7STejun Heo 	return wq->cpu_wq.v ? 0 : -ENOMEM;
27100f900049STejun Heo }
27110f900049STejun Heo 
2712bdbc5dd7STejun Heo static void free_cwqs(struct workqueue_struct *wq)
271306ba38a9SOleg Nesterov {
2714931ac77eSTejun Heo #ifdef CONFIG_SMP
2715931ac77eSTejun Heo 	bool percpu = !(wq->flags & WQ_UNBOUND);
2716931ac77eSTejun Heo #else
2717931ac77eSTejun Heo 	bool percpu = false;
2718931ac77eSTejun Heo #endif
271906ba38a9SOleg Nesterov 
2720931ac77eSTejun Heo 	if (percpu)
2721bdbc5dd7STejun Heo 		free_percpu(wq->cpu_wq.pcpu);
2722f3421797STejun Heo 	else if (wq->cpu_wq.single) {
2723f3421797STejun Heo 		/* the pointer to free is stored right after the cwq */
2724f3421797STejun Heo 		kfree(*(void **)(wq->cpu_wq.single + 1));
272506ba38a9SOleg Nesterov 	}
272606ba38a9SOleg Nesterov }
272706ba38a9SOleg Nesterov 
2728f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags,
2729f3421797STejun Heo 			       const char *name)
2730b71ab8c2STejun Heo {
2731f3421797STejun Heo 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2732f3421797STejun Heo 
2733f3421797STejun Heo 	if (max_active < 1 || max_active > lim)
2734b71ab8c2STejun Heo 		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2735b71ab8c2STejun Heo 		       "is out of range, clamping between %d and %d\n",
2736f3421797STejun Heo 		       max_active, name, 1, lim);
2737b71ab8c2STejun Heo 
2738f3421797STejun Heo 	return clamp_val(max_active, 1, lim);
2739b71ab8c2STejun Heo }
2740b71ab8c2STejun Heo 
2741d320c038STejun Heo struct workqueue_struct *__alloc_workqueue_key(const char *name,
274297e37d7bSTejun Heo 					       unsigned int flags,
27431e19ffc6STejun Heo 					       int max_active,
2744eb13ba87SJohannes Berg 					       struct lock_class_key *key,
2745eb13ba87SJohannes Berg 					       const char *lock_name)
27463af24433SOleg Nesterov {
27473af24433SOleg Nesterov 	struct workqueue_struct *wq;
2748c34056a3STejun Heo 	unsigned int cpu;
27493af24433SOleg Nesterov 
2750f3421797STejun Heo 	/*
2751f3421797STejun Heo 	 * Unbound workqueues aren't concurrency managed and should be
2752f3421797STejun Heo 	 * dispatched to workers immediately.
2753f3421797STejun Heo 	 */
2754f3421797STejun Heo 	if (flags & WQ_UNBOUND)
2755f3421797STejun Heo 		flags |= WQ_HIGHPRI;
2756f3421797STejun Heo 
2757d320c038STejun Heo 	max_active = max_active ?: WQ_DFL_ACTIVE;
2758f3421797STejun Heo 	max_active = wq_clamp_max_active(max_active, flags, name);
27593af24433SOleg Nesterov 
27603af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
27613af24433SOleg Nesterov 	if (!wq)
27624690c4abSTejun Heo 		goto err;
27633af24433SOleg Nesterov 
276497e37d7bSTejun Heo 	wq->flags = flags;
2765a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
276673f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
276773f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
276873f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
276973f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
27703af24433SOleg Nesterov 
27713af24433SOleg Nesterov 	wq->name = name;
2772eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2773cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
27743af24433SOleg Nesterov 
2775bdbc5dd7STejun Heo 	if (alloc_cwqs(wq) < 0)
2776bdbc5dd7STejun Heo 		goto err;
2777bdbc5dd7STejun Heo 
2778f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
27791537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
27808b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
27811537663fSTejun Heo 
27820f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
27838b03ae3cSTejun Heo 		cwq->gcwq = gcwq;
2784c34056a3STejun Heo 		cwq->wq = wq;
278573f53c4aSTejun Heo 		cwq->flush_color = -1;
27861e19ffc6STejun Heo 		cwq->max_active = max_active;
27871e19ffc6STejun Heo 		INIT_LIST_HEAD(&cwq->delayed_works);
2788e22bee78STejun Heo 	}
27891537663fSTejun Heo 
2790e22bee78STejun Heo 	if (flags & WQ_RESCUER) {
2791e22bee78STejun Heo 		struct worker *rescuer;
2792e22bee78STejun Heo 
2793f2e005aaSTejun Heo 		if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2794e22bee78STejun Heo 			goto err;
2795e22bee78STejun Heo 
2796e22bee78STejun Heo 		wq->rescuer = rescuer = alloc_worker();
2797e22bee78STejun Heo 		if (!rescuer)
2798e22bee78STejun Heo 			goto err;
2799e22bee78STejun Heo 
2800e22bee78STejun Heo 		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2801e22bee78STejun Heo 		if (IS_ERR(rescuer->task))
2802e22bee78STejun Heo 			goto err;
2803e22bee78STejun Heo 
2804e22bee78STejun Heo 		rescuer->task->flags |= PF_THREAD_BOUND;
2805e22bee78STejun Heo 		wake_up_process(rescuer->task);
28063af24433SOleg Nesterov 	}
28071537663fSTejun Heo 
28083af24433SOleg Nesterov 	/*
2809a0a1a5fdSTejun Heo 	 * workqueue_lock protects global freeze state and workqueues
2810a0a1a5fdSTejun Heo 	 * list.  Grab it, set max_active accordingly and add the new
2811a0a1a5fdSTejun Heo 	 * workqueue to workqueues list.
28123af24433SOleg Nesterov 	 */
28133af24433SOleg Nesterov 	spin_lock(&workqueue_lock);
2814a0a1a5fdSTejun Heo 
2815a0a1a5fdSTejun Heo 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2816f3421797STejun Heo 		for_each_cwq_cpu(cpu, wq)
2817a0a1a5fdSTejun Heo 			get_cwq(cpu, wq)->max_active = 0;
2818a0a1a5fdSTejun Heo 
28193af24433SOleg Nesterov 	list_add(&wq->list, &workqueues);
2820a0a1a5fdSTejun Heo 
28213af24433SOleg Nesterov 	spin_unlock(&workqueue_lock);
28223af24433SOleg Nesterov 
28233af24433SOleg Nesterov 	return wq;
28244690c4abSTejun Heo err:
28254690c4abSTejun Heo 	if (wq) {
2826bdbc5dd7STejun Heo 		free_cwqs(wq);
2827f2e005aaSTejun Heo 		free_mayday_mask(wq->mayday_mask);
2828e22bee78STejun Heo 		kfree(wq->rescuer);
28294690c4abSTejun Heo 		kfree(wq);
28303af24433SOleg Nesterov 	}
28314690c4abSTejun Heo 	return NULL;
28321da177e4SLinus Torvalds }
2833d320c038STejun Heo EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
28341da177e4SLinus Torvalds 
28353af24433SOleg Nesterov /**
28363af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
28373af24433SOleg Nesterov  * @wq: target workqueue
28383af24433SOleg Nesterov  *
28393af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
28403af24433SOleg Nesterov  */
28413af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
28423af24433SOleg Nesterov {
2843c8e55f36STejun Heo 	unsigned int cpu;
28443af24433SOleg Nesterov 
2845e41e704bSTejun Heo 	wq->flags |= WQ_DYING;
2846a0a1a5fdSTejun Heo 	flush_workqueue(wq);
2847a0a1a5fdSTejun Heo 
2848a0a1a5fdSTejun Heo 	/*
2849a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
2850a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
2851a0a1a5fdSTejun Heo 	 */
285295402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
28533af24433SOleg Nesterov 	list_del(&wq->list);
285495402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
28553af24433SOleg Nesterov 
2856e22bee78STejun Heo 	/* sanity check */
2857f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
285873f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
285973f53c4aSTejun Heo 		int i;
28603af24433SOleg Nesterov 
286173f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
286273f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
28631e19ffc6STejun Heo 		BUG_ON(cwq->nr_active);
28641e19ffc6STejun Heo 		BUG_ON(!list_empty(&cwq->delayed_works));
286573f53c4aSTejun Heo 	}
28661537663fSTejun Heo 
2867e22bee78STejun Heo 	if (wq->flags & WQ_RESCUER) {
2868e22bee78STejun Heo 		kthread_stop(wq->rescuer->task);
2869f2e005aaSTejun Heo 		free_mayday_mask(wq->mayday_mask);
28708d9df9f0SXiaotian Feng 		kfree(wq->rescuer);
2871e22bee78STejun Heo 	}
2872e22bee78STejun Heo 
2873bdbc5dd7STejun Heo 	free_cwqs(wq);
28743af24433SOleg Nesterov 	kfree(wq);
28753af24433SOleg Nesterov }
28763af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
28773af24433SOleg Nesterov 
2878dcd989cbSTejun Heo /**
2879dcd989cbSTejun Heo  * workqueue_set_max_active - adjust max_active of a workqueue
2880dcd989cbSTejun Heo  * @wq: target workqueue
2881dcd989cbSTejun Heo  * @max_active: new max_active value.
2882dcd989cbSTejun Heo  *
2883dcd989cbSTejun Heo  * Set max_active of @wq to @max_active.
2884dcd989cbSTejun Heo  *
2885dcd989cbSTejun Heo  * CONTEXT:
2886dcd989cbSTejun Heo  * Don't call from IRQ context.
2887dcd989cbSTejun Heo  */
2888dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2889dcd989cbSTejun Heo {
2890dcd989cbSTejun Heo 	unsigned int cpu;
2891dcd989cbSTejun Heo 
2892f3421797STejun Heo 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2893dcd989cbSTejun Heo 
2894dcd989cbSTejun Heo 	spin_lock(&workqueue_lock);
2895dcd989cbSTejun Heo 
2896dcd989cbSTejun Heo 	wq->saved_max_active = max_active;
2897dcd989cbSTejun Heo 
2898f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
2899dcd989cbSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
2900dcd989cbSTejun Heo 
2901dcd989cbSTejun Heo 		spin_lock_irq(&gcwq->lock);
2902dcd989cbSTejun Heo 
2903dcd989cbSTejun Heo 		if (!(wq->flags & WQ_FREEZEABLE) ||
2904dcd989cbSTejun Heo 		    !(gcwq->flags & GCWQ_FREEZING))
2905dcd989cbSTejun Heo 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
2906dcd989cbSTejun Heo 
2907dcd989cbSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2908dcd989cbSTejun Heo 	}
2909dcd989cbSTejun Heo 
2910dcd989cbSTejun Heo 	spin_unlock(&workqueue_lock);
2911dcd989cbSTejun Heo }
2912dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2913dcd989cbSTejun Heo 
2914dcd989cbSTejun Heo /**
2915dcd989cbSTejun Heo  * workqueue_congested - test whether a workqueue is congested
2916dcd989cbSTejun Heo  * @cpu: CPU in question
2917dcd989cbSTejun Heo  * @wq: target workqueue
2918dcd989cbSTejun Heo  *
2919dcd989cbSTejun Heo  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
2920dcd989cbSTejun Heo  * no synchronization around this function and the test result is
2921dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2922dcd989cbSTejun Heo  *
2923dcd989cbSTejun Heo  * RETURNS:
2924dcd989cbSTejun Heo  * %true if congested, %false otherwise.
2925dcd989cbSTejun Heo  */
2926dcd989cbSTejun Heo bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2927dcd989cbSTejun Heo {
2928dcd989cbSTejun Heo 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2929dcd989cbSTejun Heo 
2930dcd989cbSTejun Heo 	return !list_empty(&cwq->delayed_works);
2931dcd989cbSTejun Heo }
2932dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested);
2933dcd989cbSTejun Heo 
2934dcd989cbSTejun Heo /**
2935dcd989cbSTejun Heo  * work_cpu - return the last known associated cpu for @work
2936dcd989cbSTejun Heo  * @work: the work of interest
2937dcd989cbSTejun Heo  *
2938dcd989cbSTejun Heo  * RETURNS:
2939bdbc5dd7STejun Heo  * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
2940dcd989cbSTejun Heo  */
2941dcd989cbSTejun Heo unsigned int work_cpu(struct work_struct *work)
2942dcd989cbSTejun Heo {
2943dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2944dcd989cbSTejun Heo 
2945bdbc5dd7STejun Heo 	return gcwq ? gcwq->cpu : WORK_CPU_NONE;
2946dcd989cbSTejun Heo }
2947dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_cpu);
2948dcd989cbSTejun Heo 
2949dcd989cbSTejun Heo /**
2950dcd989cbSTejun Heo  * work_busy - test whether a work is currently pending or running
2951dcd989cbSTejun Heo  * @work: the work to be tested
2952dcd989cbSTejun Heo  *
2953dcd989cbSTejun Heo  * Test whether @work is currently pending or running.  There is no
2954dcd989cbSTejun Heo  * synchronization around this function and the test result is
2955dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2956dcd989cbSTejun Heo  * Especially for reentrant wqs, the pending state might hide the
2957dcd989cbSTejun Heo  * running state.
2958dcd989cbSTejun Heo  *
2959dcd989cbSTejun Heo  * RETURNS:
2960dcd989cbSTejun Heo  * OR'd bitmask of WORK_BUSY_* bits.
2961dcd989cbSTejun Heo  */
2962dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work)
2963dcd989cbSTejun Heo {
2964dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2965dcd989cbSTejun Heo 	unsigned long flags;
2966dcd989cbSTejun Heo 	unsigned int ret = 0;
2967dcd989cbSTejun Heo 
2968dcd989cbSTejun Heo 	if (!gcwq)
2969dcd989cbSTejun Heo 		return false;
2970dcd989cbSTejun Heo 
2971dcd989cbSTejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
2972dcd989cbSTejun Heo 
2973dcd989cbSTejun Heo 	if (work_pending(work))
2974dcd989cbSTejun Heo 		ret |= WORK_BUSY_PENDING;
2975dcd989cbSTejun Heo 	if (find_worker_executing_work(gcwq, work))
2976dcd989cbSTejun Heo 		ret |= WORK_BUSY_RUNNING;
2977dcd989cbSTejun Heo 
2978dcd989cbSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
2979dcd989cbSTejun Heo 
2980dcd989cbSTejun Heo 	return ret;
2981dcd989cbSTejun Heo }
2982dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy);
2983dcd989cbSTejun Heo 
2984db7bccf4STejun Heo /*
2985db7bccf4STejun Heo  * CPU hotplug.
2986db7bccf4STejun Heo  *
2987e22bee78STejun Heo  * There are two challenges in supporting CPU hotplug.  Firstly, there
2988e22bee78STejun Heo  * are a lot of assumptions on strong associations among work, cwq and
2989e22bee78STejun Heo  * gcwq which make migrating pending and scheduled works very
2990e22bee78STejun Heo  * difficult to implement without impacting hot paths.  Secondly,
2991e22bee78STejun Heo  * gcwqs serve mix of short, long and very long running works making
2992e22bee78STejun Heo  * blocked draining impractical.
2993e22bee78STejun Heo  *
2994e22bee78STejun Heo  * This is solved by allowing a gcwq to be detached from CPU, running
2995e22bee78STejun Heo  * it with unbound (rogue) workers and allowing it to be reattached
2996e22bee78STejun Heo  * later if the cpu comes back online.  A separate thread is created
2997e22bee78STejun Heo  * to govern a gcwq in such state and is called the trustee of the
2998e22bee78STejun Heo  * gcwq.
2999db7bccf4STejun Heo  *
3000db7bccf4STejun Heo  * Trustee states and their descriptions.
3001db7bccf4STejun Heo  *
3002db7bccf4STejun Heo  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
3003db7bccf4STejun Heo  *		new trustee is started with this state.
3004db7bccf4STejun Heo  *
3005db7bccf4STejun Heo  * IN_CHARGE	Once started, trustee will enter this state after
3006e22bee78STejun Heo  *		assuming the manager role and making all existing
3007e22bee78STejun Heo  *		workers rogue.  DOWN_PREPARE waits for trustee to
3008e22bee78STejun Heo  *		enter this state.  After reaching IN_CHARGE, trustee
3009e22bee78STejun Heo  *		tries to execute the pending worklist until it's empty
3010e22bee78STejun Heo  *		and the state is set to BUTCHER, or the state is set
3011e22bee78STejun Heo  *		to RELEASE.
3012db7bccf4STejun Heo  *
3013db7bccf4STejun Heo  * BUTCHER	Command state which is set by the cpu callback after
3014db7bccf4STejun Heo  *		the cpu has went down.  Once this state is set trustee
3015db7bccf4STejun Heo  *		knows that there will be no new works on the worklist
3016db7bccf4STejun Heo  *		and once the worklist is empty it can proceed to
3017db7bccf4STejun Heo  *		killing idle workers.
3018db7bccf4STejun Heo  *
3019db7bccf4STejun Heo  * RELEASE	Command state which is set by the cpu callback if the
3020db7bccf4STejun Heo  *		cpu down has been canceled or it has come online
3021db7bccf4STejun Heo  *		again.  After recognizing this state, trustee stops
3022e22bee78STejun Heo  *		trying to drain or butcher and clears ROGUE, rebinds
3023e22bee78STejun Heo  *		all remaining workers back to the cpu and releases
3024e22bee78STejun Heo  *		manager role.
3025db7bccf4STejun Heo  *
3026db7bccf4STejun Heo  * DONE		Trustee will enter this state after BUTCHER or RELEASE
3027db7bccf4STejun Heo  *		is complete.
3028db7bccf4STejun Heo  *
3029db7bccf4STejun Heo  *          trustee                 CPU                draining
3030db7bccf4STejun Heo  *         took over                down               complete
3031db7bccf4STejun Heo  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3032db7bccf4STejun Heo  *                        |                     |                  ^
3033db7bccf4STejun Heo  *                        | CPU is back online  v   return workers |
3034db7bccf4STejun Heo  *                         ----------------> RELEASE --------------
3035db7bccf4STejun Heo  */
3036db7bccf4STejun Heo 
3037db7bccf4STejun Heo /**
3038db7bccf4STejun Heo  * trustee_wait_event_timeout - timed event wait for trustee
3039db7bccf4STejun Heo  * @cond: condition to wait for
3040db7bccf4STejun Heo  * @timeout: timeout in jiffies
3041db7bccf4STejun Heo  *
3042db7bccf4STejun Heo  * wait_event_timeout() for trustee to use.  Handles locking and
3043db7bccf4STejun Heo  * checks for RELEASE request.
3044db7bccf4STejun Heo  *
3045db7bccf4STejun Heo  * CONTEXT:
3046db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3047db7bccf4STejun Heo  * multiple times.  To be used by trustee.
3048db7bccf4STejun Heo  *
3049db7bccf4STejun Heo  * RETURNS:
3050db7bccf4STejun Heo  * Positive indicating left time if @cond is satisfied, 0 if timed
3051db7bccf4STejun Heo  * out, -1 if canceled.
3052db7bccf4STejun Heo  */
3053db7bccf4STejun Heo #define trustee_wait_event_timeout(cond, timeout) ({			\
3054db7bccf4STejun Heo 	long __ret = (timeout);						\
3055db7bccf4STejun Heo 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
3056db7bccf4STejun Heo 	       __ret) {							\
3057db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);				\
3058db7bccf4STejun Heo 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
3059db7bccf4STejun Heo 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
3060db7bccf4STejun Heo 			__ret);						\
3061db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);				\
3062db7bccf4STejun Heo 	}								\
3063db7bccf4STejun Heo 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
3064db7bccf4STejun Heo })
3065db7bccf4STejun Heo 
3066db7bccf4STejun Heo /**
3067db7bccf4STejun Heo  * trustee_wait_event - event wait for trustee
3068db7bccf4STejun Heo  * @cond: condition to wait for
3069db7bccf4STejun Heo  *
3070db7bccf4STejun Heo  * wait_event() for trustee to use.  Automatically handles locking and
3071db7bccf4STejun Heo  * checks for CANCEL request.
3072db7bccf4STejun Heo  *
3073db7bccf4STejun Heo  * CONTEXT:
3074db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3075db7bccf4STejun Heo  * multiple times.  To be used by trustee.
3076db7bccf4STejun Heo  *
3077db7bccf4STejun Heo  * RETURNS:
3078db7bccf4STejun Heo  * 0 if @cond is satisfied, -1 if canceled.
3079db7bccf4STejun Heo  */
3080db7bccf4STejun Heo #define trustee_wait_event(cond) ({					\
3081db7bccf4STejun Heo 	long __ret1;							\
3082db7bccf4STejun Heo 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3083db7bccf4STejun Heo 	__ret1 < 0 ? -1 : 0;						\
3084db7bccf4STejun Heo })
3085db7bccf4STejun Heo 
3086db7bccf4STejun Heo static int __cpuinit trustee_thread(void *__gcwq)
3087db7bccf4STejun Heo {
3088db7bccf4STejun Heo 	struct global_cwq *gcwq = __gcwq;
3089db7bccf4STejun Heo 	struct worker *worker;
3090e22bee78STejun Heo 	struct work_struct *work;
3091db7bccf4STejun Heo 	struct hlist_node *pos;
3092e22bee78STejun Heo 	long rc;
3093db7bccf4STejun Heo 	int i;
3094db7bccf4STejun Heo 
3095db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
3096db7bccf4STejun Heo 
3097db7bccf4STejun Heo 	spin_lock_irq(&gcwq->lock);
3098db7bccf4STejun Heo 	/*
3099e22bee78STejun Heo 	 * Claim the manager position and make all workers rogue.
3100e22bee78STejun Heo 	 * Trustee must be bound to the target cpu and can't be
3101e22bee78STejun Heo 	 * cancelled.
3102db7bccf4STejun Heo 	 */
3103db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
3104e22bee78STejun Heo 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3105e22bee78STejun Heo 	BUG_ON(rc < 0);
3106e22bee78STejun Heo 
3107e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
3108db7bccf4STejun Heo 
3109db7bccf4STejun Heo 	list_for_each_entry(worker, &gcwq->idle_list, entry)
3110cb444766STejun Heo 		worker->flags |= WORKER_ROGUE;
3111db7bccf4STejun Heo 
3112db7bccf4STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq)
3113cb444766STejun Heo 		worker->flags |= WORKER_ROGUE;
3114db7bccf4STejun Heo 
3115db7bccf4STejun Heo 	/*
3116e22bee78STejun Heo 	 * Call schedule() so that we cross rq->lock and thus can
3117e22bee78STejun Heo 	 * guarantee sched callbacks see the rogue flag.  This is
3118e22bee78STejun Heo 	 * necessary as scheduler callbacks may be invoked from other
3119e22bee78STejun Heo 	 * cpus.
3120e22bee78STejun Heo 	 */
3121e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
3122e22bee78STejun Heo 	schedule();
3123e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
3124e22bee78STejun Heo 
3125e22bee78STejun Heo 	/*
3126cb444766STejun Heo 	 * Sched callbacks are disabled now.  Zap nr_running.  After
3127cb444766STejun Heo 	 * this, nr_running stays zero and need_more_worker() and
3128cb444766STejun Heo 	 * keep_working() are always true as long as the worklist is
3129cb444766STejun Heo 	 * not empty.
3130e22bee78STejun Heo 	 */
3131cb444766STejun Heo 	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3132e22bee78STejun Heo 
3133e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
3134e22bee78STejun Heo 	del_timer_sync(&gcwq->idle_timer);
3135e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
3136e22bee78STejun Heo 
3137e22bee78STejun Heo 	/*
3138db7bccf4STejun Heo 	 * We're now in charge.  Notify and proceed to drain.  We need
3139db7bccf4STejun Heo 	 * to keep the gcwq running during the whole CPU down
3140db7bccf4STejun Heo 	 * procedure as other cpu hotunplug callbacks may need to
3141db7bccf4STejun Heo 	 * flush currently running tasks.
3142db7bccf4STejun Heo 	 */
3143db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3144db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3145db7bccf4STejun Heo 
3146db7bccf4STejun Heo 	/*
3147db7bccf4STejun Heo 	 * The original cpu is in the process of dying and may go away
3148db7bccf4STejun Heo 	 * anytime now.  When that happens, we and all workers would
3149e22bee78STejun Heo 	 * be migrated to other cpus.  Try draining any left work.  We
3150e22bee78STejun Heo 	 * want to get it over with ASAP - spam rescuers, wake up as
3151e22bee78STejun Heo 	 * many idlers as necessary and create new ones till the
3152e22bee78STejun Heo 	 * worklist is empty.  Note that if the gcwq is frozen, there
3153e22bee78STejun Heo 	 * may be frozen works in freezeable cwqs.  Don't declare
3154e22bee78STejun Heo 	 * completion while frozen.
3155db7bccf4STejun Heo 	 */
3156db7bccf4STejun Heo 	while (gcwq->nr_workers != gcwq->nr_idle ||
3157db7bccf4STejun Heo 	       gcwq->flags & GCWQ_FREEZING ||
3158db7bccf4STejun Heo 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3159e22bee78STejun Heo 		int nr_works = 0;
3160e22bee78STejun Heo 
3161e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry) {
3162e22bee78STejun Heo 			send_mayday(work);
3163e22bee78STejun Heo 			nr_works++;
3164e22bee78STejun Heo 		}
3165e22bee78STejun Heo 
3166e22bee78STejun Heo 		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3167e22bee78STejun Heo 			if (!nr_works--)
3168e22bee78STejun Heo 				break;
3169e22bee78STejun Heo 			wake_up_process(worker->task);
3170e22bee78STejun Heo 		}
3171e22bee78STejun Heo 
3172e22bee78STejun Heo 		if (need_to_create_worker(gcwq)) {
3173e22bee78STejun Heo 			spin_unlock_irq(&gcwq->lock);
3174e22bee78STejun Heo 			worker = create_worker(gcwq, false);
3175e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
3176e22bee78STejun Heo 			if (worker) {
3177cb444766STejun Heo 				worker->flags |= WORKER_ROGUE;
3178e22bee78STejun Heo 				start_worker(worker);
3179e22bee78STejun Heo 			}
3180e22bee78STejun Heo 		}
3181e22bee78STejun Heo 
3182db7bccf4STejun Heo 		/* give a breather */
3183db7bccf4STejun Heo 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3184db7bccf4STejun Heo 			break;
3185db7bccf4STejun Heo 	}
3186db7bccf4STejun Heo 
3187e22bee78STejun Heo 	/*
3188e22bee78STejun Heo 	 * Either all works have been scheduled and cpu is down, or
3189e22bee78STejun Heo 	 * cpu down has already been canceled.  Wait for and butcher
3190e22bee78STejun Heo 	 * all workers till we're canceled.
3191e22bee78STejun Heo 	 */
3192e22bee78STejun Heo 	do {
3193e22bee78STejun Heo 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3194e22bee78STejun Heo 		while (!list_empty(&gcwq->idle_list))
3195e22bee78STejun Heo 			destroy_worker(list_first_entry(&gcwq->idle_list,
3196e22bee78STejun Heo 							struct worker, entry));
3197e22bee78STejun Heo 	} while (gcwq->nr_workers && rc >= 0);
3198e22bee78STejun Heo 
3199e22bee78STejun Heo 	/*
3200e22bee78STejun Heo 	 * At this point, either draining has completed and no worker
3201e22bee78STejun Heo 	 * is left, or cpu down has been canceled or the cpu is being
3202e22bee78STejun Heo 	 * brought back up.  There shouldn't be any idle one left.
3203e22bee78STejun Heo 	 * Tell the remaining busy ones to rebind once it finishes the
3204e22bee78STejun Heo 	 * currently scheduled works by scheduling the rebind_work.
3205e22bee78STejun Heo 	 */
3206e22bee78STejun Heo 	WARN_ON(!list_empty(&gcwq->idle_list));
3207e22bee78STejun Heo 
3208e22bee78STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq) {
3209e22bee78STejun Heo 		struct work_struct *rebind_work = &worker->rebind_work;
3210e22bee78STejun Heo 
3211e22bee78STejun Heo 		/*
3212e22bee78STejun Heo 		 * Rebind_work may race with future cpu hotplug
3213e22bee78STejun Heo 		 * operations.  Use a separate flag to mark that
3214e22bee78STejun Heo 		 * rebinding is scheduled.
3215e22bee78STejun Heo 		 */
3216cb444766STejun Heo 		worker->flags |= WORKER_REBIND;
3217cb444766STejun Heo 		worker->flags &= ~WORKER_ROGUE;
3218e22bee78STejun Heo 
3219e22bee78STejun Heo 		/* queue rebind_work, wq doesn't matter, use the default one */
3220e22bee78STejun Heo 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3221e22bee78STejun Heo 				     work_data_bits(rebind_work)))
3222e22bee78STejun Heo 			continue;
3223e22bee78STejun Heo 
3224e22bee78STejun Heo 		debug_work_activate(rebind_work);
3225d320c038STejun Heo 		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3226e22bee78STejun Heo 			    worker->scheduled.next,
3227e22bee78STejun Heo 			    work_color_to_flags(WORK_NO_COLOR));
3228e22bee78STejun Heo 	}
3229e22bee78STejun Heo 
3230e22bee78STejun Heo 	/* relinquish manager role */
3231e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3232e22bee78STejun Heo 
3233db7bccf4STejun Heo 	/* notify completion */
3234db7bccf4STejun Heo 	gcwq->trustee = NULL;
3235db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_DONE;
3236db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3237db7bccf4STejun Heo 	spin_unlock_irq(&gcwq->lock);
3238db7bccf4STejun Heo 	return 0;
3239db7bccf4STejun Heo }
3240db7bccf4STejun Heo 
3241db7bccf4STejun Heo /**
3242db7bccf4STejun Heo  * wait_trustee_state - wait for trustee to enter the specified state
3243db7bccf4STejun Heo  * @gcwq: gcwq the trustee of interest belongs to
3244db7bccf4STejun Heo  * @state: target state to wait for
3245db7bccf4STejun Heo  *
3246db7bccf4STejun Heo  * Wait for the trustee to reach @state.  DONE is already matched.
3247db7bccf4STejun Heo  *
3248db7bccf4STejun Heo  * CONTEXT:
3249db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3250db7bccf4STejun Heo  * multiple times.  To be used by cpu_callback.
3251db7bccf4STejun Heo  */
3252db7bccf4STejun Heo static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
325306bd6ebfSNamhyung Kim __releases(&gcwq->lock)
325406bd6ebfSNamhyung Kim __acquires(&gcwq->lock)
3255db7bccf4STejun Heo {
3256db7bccf4STejun Heo 	if (!(gcwq->trustee_state == state ||
3257db7bccf4STejun Heo 	      gcwq->trustee_state == TRUSTEE_DONE)) {
3258db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);
3259db7bccf4STejun Heo 		__wait_event(gcwq->trustee_wait,
3260db7bccf4STejun Heo 			     gcwq->trustee_state == state ||
3261db7bccf4STejun Heo 			     gcwq->trustee_state == TRUSTEE_DONE);
3262db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);
3263db7bccf4STejun Heo 	}
3264db7bccf4STejun Heo }
3265db7bccf4STejun Heo 
32669c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
32671da177e4SLinus Torvalds 						unsigned long action,
32681da177e4SLinus Torvalds 						void *hcpu)
32691da177e4SLinus Torvalds {
32703af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
3271db7bccf4STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
3272db7bccf4STejun Heo 	struct task_struct *new_trustee = NULL;
3273e22bee78STejun Heo 	struct worker *uninitialized_var(new_worker);
3274db7bccf4STejun Heo 	unsigned long flags;
32751da177e4SLinus Torvalds 
32768bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
32778bb78442SRafael J. Wysocki 
32781da177e4SLinus Torvalds 	switch (action) {
3279db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3280db7bccf4STejun Heo 		new_trustee = kthread_create(trustee_thread, gcwq,
3281db7bccf4STejun Heo 					     "workqueue_trustee/%d\n", cpu);
3282db7bccf4STejun Heo 		if (IS_ERR(new_trustee))
3283db7bccf4STejun Heo 			return notifier_from_errno(PTR_ERR(new_trustee));
3284db7bccf4STejun Heo 		kthread_bind(new_trustee, cpu);
3285e22bee78STejun Heo 		/* fall through */
32863af24433SOleg Nesterov 	case CPU_UP_PREPARE:
3287e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3288e22bee78STejun Heo 		new_worker = create_worker(gcwq, false);
3289e22bee78STejun Heo 		if (!new_worker) {
3290e22bee78STejun Heo 			if (new_trustee)
3291e22bee78STejun Heo 				kthread_stop(new_trustee);
3292e22bee78STejun Heo 			return NOTIFY_BAD;
32933af24433SOleg Nesterov 		}
3294db7bccf4STejun Heo 	}
32951537663fSTejun Heo 
3296db7bccf4STejun Heo 	/* some are called w/ irq disabled, don't disturb irq status */
3297db7bccf4STejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
32983af24433SOleg Nesterov 
32993af24433SOleg Nesterov 	switch (action) {
3300db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3301db7bccf4STejun Heo 		/* initialize trustee and tell it to acquire the gcwq */
3302db7bccf4STejun Heo 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3303db7bccf4STejun Heo 		gcwq->trustee = new_trustee;
3304db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_START;
3305db7bccf4STejun Heo 		wake_up_process(gcwq->trustee);
3306db7bccf4STejun Heo 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3307e22bee78STejun Heo 		/* fall through */
33083af24433SOleg Nesterov 	case CPU_UP_PREPARE:
3309e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3310e22bee78STejun Heo 		gcwq->first_idle = new_worker;
33111da177e4SLinus Torvalds 		break;
33121da177e4SLinus Torvalds 
3313e22bee78STejun Heo 	case CPU_DYING:
3314e22bee78STejun Heo 		/*
3315e22bee78STejun Heo 		 * Before this, the trustee and all workers except for
3316e22bee78STejun Heo 		 * the ones which are still executing works from
3317e22bee78STejun Heo 		 * before the last CPU down must be on the cpu.  After
3318e22bee78STejun Heo 		 * this, they'll all be diasporas.
3319e22bee78STejun Heo 		 */
3320e22bee78STejun Heo 		gcwq->flags |= GCWQ_DISASSOCIATED;
3321db7bccf4STejun Heo 		break;
3322db7bccf4STejun Heo 
33233da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
3324db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_BUTCHER;
3325e22bee78STejun Heo 		/* fall through */
3326e22bee78STejun Heo 	case CPU_UP_CANCELED:
3327e22bee78STejun Heo 		destroy_worker(gcwq->first_idle);
3328e22bee78STejun Heo 		gcwq->first_idle = NULL;
3329db7bccf4STejun Heo 		break;
3330db7bccf4STejun Heo 
3331db7bccf4STejun Heo 	case CPU_DOWN_FAILED:
33321da177e4SLinus Torvalds 	case CPU_ONLINE:
3333e22bee78STejun Heo 		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3334db7bccf4STejun Heo 		if (gcwq->trustee_state != TRUSTEE_DONE) {
3335db7bccf4STejun Heo 			gcwq->trustee_state = TRUSTEE_RELEASE;
3336db7bccf4STejun Heo 			wake_up_process(gcwq->trustee);
3337db7bccf4STejun Heo 			wait_trustee_state(gcwq, TRUSTEE_DONE);
3338db7bccf4STejun Heo 		}
33391da177e4SLinus Torvalds 
3340e22bee78STejun Heo 		/*
3341e22bee78STejun Heo 		 * Trustee is done and there might be no worker left.
3342e22bee78STejun Heo 		 * Put the first_idle in and request a real manager to
3343e22bee78STejun Heo 		 * take a look.
3344e22bee78STejun Heo 		 */
3345e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3346e22bee78STejun Heo 		kthread_bind(gcwq->first_idle->task, cpu);
3347e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3348e22bee78STejun Heo 		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3349e22bee78STejun Heo 		start_worker(gcwq->first_idle);
3350e22bee78STejun Heo 		gcwq->first_idle = NULL;
33511da177e4SLinus Torvalds 		break;
33521da177e4SLinus Torvalds 	}
33531da177e4SLinus Torvalds 
3354db7bccf4STejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
335500dfcaf7SOleg Nesterov 
33561537663fSTejun Heo 	return notifier_from_errno(0);
33571da177e4SLinus Torvalds }
33581da177e4SLinus Torvalds 
33592d3854a3SRusty Russell #ifdef CONFIG_SMP
33608ccad40dSRusty Russell 
33612d3854a3SRusty Russell struct work_for_cpu {
33626b44003eSAndrew Morton 	struct completion completion;
33632d3854a3SRusty Russell 	long (*fn)(void *);
33642d3854a3SRusty Russell 	void *arg;
33652d3854a3SRusty Russell 	long ret;
33662d3854a3SRusty Russell };
33672d3854a3SRusty Russell 
33686b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
33692d3854a3SRusty Russell {
33706b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
33712d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
33726b44003eSAndrew Morton 	complete(&wfc->completion);
33736b44003eSAndrew Morton 	return 0;
33742d3854a3SRusty Russell }
33752d3854a3SRusty Russell 
33762d3854a3SRusty Russell /**
33772d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
33782d3854a3SRusty Russell  * @cpu: the cpu to run on
33792d3854a3SRusty Russell  * @fn: the function to run
33802d3854a3SRusty Russell  * @arg: the function arg
33812d3854a3SRusty Russell  *
338231ad9081SRusty Russell  * This will return the value @fn returns.
338331ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
33846b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
33852d3854a3SRusty Russell  */
33862d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
33872d3854a3SRusty Russell {
33886b44003eSAndrew Morton 	struct task_struct *sub_thread;
33896b44003eSAndrew Morton 	struct work_for_cpu wfc = {
33906b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
33916b44003eSAndrew Morton 		.fn = fn,
33926b44003eSAndrew Morton 		.arg = arg,
33936b44003eSAndrew Morton 	};
33942d3854a3SRusty Russell 
33956b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
33966b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
33976b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
33986b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
33996b44003eSAndrew Morton 	wake_up_process(sub_thread);
34006b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
34012d3854a3SRusty Russell 	return wfc.ret;
34022d3854a3SRusty Russell }
34032d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
34042d3854a3SRusty Russell #endif /* CONFIG_SMP */
34052d3854a3SRusty Russell 
3406a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
3407e7577c50SRusty Russell 
3408a0a1a5fdSTejun Heo /**
3409a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
3410a0a1a5fdSTejun Heo  *
3411a0a1a5fdSTejun Heo  * Start freezing workqueues.  After this function returns, all
3412a0a1a5fdSTejun Heo  * freezeable workqueues will queue new works to their frozen_works
34137e11629dSTejun Heo  * list instead of gcwq->worklist.
3414a0a1a5fdSTejun Heo  *
3415a0a1a5fdSTejun Heo  * CONTEXT:
34168b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3417a0a1a5fdSTejun Heo  */
3418a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
3419a0a1a5fdSTejun Heo {
3420a0a1a5fdSTejun Heo 	unsigned int cpu;
3421a0a1a5fdSTejun Heo 
3422a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3423a0a1a5fdSTejun Heo 
3424a0a1a5fdSTejun Heo 	BUG_ON(workqueue_freezing);
3425a0a1a5fdSTejun Heo 	workqueue_freezing = true;
3426a0a1a5fdSTejun Heo 
3427f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
34288b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3429bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
34308b03ae3cSTejun Heo 
34318b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
34328b03ae3cSTejun Heo 
3433db7bccf4STejun Heo 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3434db7bccf4STejun Heo 		gcwq->flags |= GCWQ_FREEZING;
3435db7bccf4STejun Heo 
3436a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3437a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3438a0a1a5fdSTejun Heo 
3439f3421797STejun Heo 			if (cwq && wq->flags & WQ_FREEZEABLE)
3440a0a1a5fdSTejun Heo 				cwq->max_active = 0;
34411da177e4SLinus Torvalds 		}
34428b03ae3cSTejun Heo 
34438b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3444a0a1a5fdSTejun Heo 	}
3445a0a1a5fdSTejun Heo 
3446a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3447a0a1a5fdSTejun Heo }
3448a0a1a5fdSTejun Heo 
3449a0a1a5fdSTejun Heo /**
3450a0a1a5fdSTejun Heo  * freeze_workqueues_busy - are freezeable workqueues still busy?
3451a0a1a5fdSTejun Heo  *
3452a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
3453a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
3454a0a1a5fdSTejun Heo  *
3455a0a1a5fdSTejun Heo  * CONTEXT:
3456a0a1a5fdSTejun Heo  * Grabs and releases workqueue_lock.
3457a0a1a5fdSTejun Heo  *
3458a0a1a5fdSTejun Heo  * RETURNS:
3459a0a1a5fdSTejun Heo  * %true if some freezeable workqueues are still busy.  %false if
3460a0a1a5fdSTejun Heo  * freezing is complete.
3461a0a1a5fdSTejun Heo  */
3462a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
3463a0a1a5fdSTejun Heo {
3464a0a1a5fdSTejun Heo 	unsigned int cpu;
3465a0a1a5fdSTejun Heo 	bool busy = false;
3466a0a1a5fdSTejun Heo 
3467a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3468a0a1a5fdSTejun Heo 
3469a0a1a5fdSTejun Heo 	BUG_ON(!workqueue_freezing);
3470a0a1a5fdSTejun Heo 
3471f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
3472bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
3473a0a1a5fdSTejun Heo 		/*
3474a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
3475a0a1a5fdSTejun Heo 		 * to peek without lock.
3476a0a1a5fdSTejun Heo 		 */
3477a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3478a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3479a0a1a5fdSTejun Heo 
3480f3421797STejun Heo 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3481a0a1a5fdSTejun Heo 				continue;
3482a0a1a5fdSTejun Heo 
3483a0a1a5fdSTejun Heo 			BUG_ON(cwq->nr_active < 0);
3484a0a1a5fdSTejun Heo 			if (cwq->nr_active) {
3485a0a1a5fdSTejun Heo 				busy = true;
3486a0a1a5fdSTejun Heo 				goto out_unlock;
3487a0a1a5fdSTejun Heo 			}
3488a0a1a5fdSTejun Heo 		}
3489a0a1a5fdSTejun Heo 	}
3490a0a1a5fdSTejun Heo out_unlock:
3491a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3492a0a1a5fdSTejun Heo 	return busy;
3493a0a1a5fdSTejun Heo }
3494a0a1a5fdSTejun Heo 
3495a0a1a5fdSTejun Heo /**
3496a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
3497a0a1a5fdSTejun Heo  *
3498a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
34997e11629dSTejun Heo  * frozen works are transferred to their respective gcwq worklists.
3500a0a1a5fdSTejun Heo  *
3501a0a1a5fdSTejun Heo  * CONTEXT:
35028b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3503a0a1a5fdSTejun Heo  */
3504a0a1a5fdSTejun Heo void thaw_workqueues(void)
3505a0a1a5fdSTejun Heo {
3506a0a1a5fdSTejun Heo 	unsigned int cpu;
3507a0a1a5fdSTejun Heo 
3508a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3509a0a1a5fdSTejun Heo 
3510a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
3511a0a1a5fdSTejun Heo 		goto out_unlock;
3512a0a1a5fdSTejun Heo 
3513f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
35148b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3515bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
35168b03ae3cSTejun Heo 
35178b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
35188b03ae3cSTejun Heo 
3519db7bccf4STejun Heo 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3520db7bccf4STejun Heo 		gcwq->flags &= ~GCWQ_FREEZING;
3521db7bccf4STejun Heo 
3522a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3523a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3524a0a1a5fdSTejun Heo 
3525f3421797STejun Heo 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3526a0a1a5fdSTejun Heo 				continue;
3527a0a1a5fdSTejun Heo 
3528a0a1a5fdSTejun Heo 			/* restore max_active and repopulate worklist */
3529a0a1a5fdSTejun Heo 			cwq->max_active = wq->saved_max_active;
3530a0a1a5fdSTejun Heo 
3531a0a1a5fdSTejun Heo 			while (!list_empty(&cwq->delayed_works) &&
3532a0a1a5fdSTejun Heo 			       cwq->nr_active < cwq->max_active)
3533a0a1a5fdSTejun Heo 				cwq_activate_first_delayed(cwq);
3534a0a1a5fdSTejun Heo 		}
35358b03ae3cSTejun Heo 
3536e22bee78STejun Heo 		wake_up_worker(gcwq);
3537e22bee78STejun Heo 
35388b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3539a0a1a5fdSTejun Heo 	}
3540a0a1a5fdSTejun Heo 
3541a0a1a5fdSTejun Heo 	workqueue_freezing = false;
3542a0a1a5fdSTejun Heo out_unlock:
3543a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3544a0a1a5fdSTejun Heo }
3545a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
3546a0a1a5fdSTejun Heo 
35476ee0578bSSuresh Siddha static int __init init_workqueues(void)
35481da177e4SLinus Torvalds {
3549c34056a3STejun Heo 	unsigned int cpu;
3550c8e55f36STejun Heo 	int i;
3551c34056a3STejun Heo 
3552f6500947STejun Heo 	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
35538b03ae3cSTejun Heo 
35548b03ae3cSTejun Heo 	/* initialize gcwqs */
3555f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
35568b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
35578b03ae3cSTejun Heo 
35588b03ae3cSTejun Heo 		spin_lock_init(&gcwq->lock);
35597e11629dSTejun Heo 		INIT_LIST_HEAD(&gcwq->worklist);
35608b03ae3cSTejun Heo 		gcwq->cpu = cpu;
3561f3421797STejun Heo 		if (cpu == WORK_CPU_UNBOUND)
3562f3421797STejun Heo 			gcwq->flags |= GCWQ_DISASSOCIATED;
35638b03ae3cSTejun Heo 
3564c8e55f36STejun Heo 		INIT_LIST_HEAD(&gcwq->idle_list);
3565c8e55f36STejun Heo 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3566c8e55f36STejun Heo 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3567c8e55f36STejun Heo 
3568e22bee78STejun Heo 		init_timer_deferrable(&gcwq->idle_timer);
3569e22bee78STejun Heo 		gcwq->idle_timer.function = idle_worker_timeout;
3570e22bee78STejun Heo 		gcwq->idle_timer.data = (unsigned long)gcwq;
3571e22bee78STejun Heo 
3572e22bee78STejun Heo 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3573e22bee78STejun Heo 			    (unsigned long)gcwq);
3574e22bee78STejun Heo 
35758b03ae3cSTejun Heo 		ida_init(&gcwq->worker_ida);
3576db7bccf4STejun Heo 
3577db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_DONE;
3578db7bccf4STejun Heo 		init_waitqueue_head(&gcwq->trustee_wait);
35798b03ae3cSTejun Heo 	}
35808b03ae3cSTejun Heo 
3581e22bee78STejun Heo 	/* create the initial worker */
3582f3421797STejun Heo 	for_each_online_gcwq_cpu(cpu) {
3583e22bee78STejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3584e22bee78STejun Heo 		struct worker *worker;
3585e22bee78STejun Heo 
3586e22bee78STejun Heo 		worker = create_worker(gcwq, true);
3587e22bee78STejun Heo 		BUG_ON(!worker);
3588e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3589e22bee78STejun Heo 		start_worker(worker);
3590e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3591e22bee78STejun Heo 	}
3592e22bee78STejun Heo 
3593d320c038STejun Heo 	system_wq = alloc_workqueue("events", 0, 0);
3594d320c038STejun Heo 	system_long_wq = alloc_workqueue("events_long", 0, 0);
3595d320c038STejun Heo 	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3596f3421797STejun Heo 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3597f3421797STejun Heo 					    WQ_UNBOUND_MAX_ACTIVE);
3598d320c038STejun Heo 	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
35996ee0578bSSuresh Siddha 	return 0;
36001da177e4SLinus Torvalds }
36016ee0578bSSuresh Siddha early_initcall(init_workqueues);
3602