xref: /linux-6.15/kernel/workqueue.c (revision c7fc77f7)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36c34056a3STejun Heo #include <linux/idr.h>
37e22bee78STejun Heo 
38e22bee78STejun Heo #include "workqueue_sched.h"
391da177e4SLinus Torvalds 
40c8e55f36STejun Heo enum {
41db7bccf4STejun Heo 	/* global_cwq flags */
42e22bee78STejun Heo 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
43e22bee78STejun Heo 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
44e22bee78STejun Heo 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
45db7bccf4STejun Heo 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
46649027d7STejun Heo 	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
47db7bccf4STejun Heo 
48c8e55f36STejun Heo 	/* worker flags */
49c8e55f36STejun Heo 	WORKER_STARTED		= 1 << 0,	/* started */
50c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
51c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
52e22bee78STejun Heo 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
53db7bccf4STejun Heo 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
54e22bee78STejun Heo 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
55fb0e7bebSTejun Heo 	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
56f3421797STejun Heo 	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
57e22bee78STejun Heo 
58fb0e7bebSTejun Heo 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
59f3421797STejun Heo 				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
60db7bccf4STejun Heo 
61db7bccf4STejun Heo 	/* gcwq->trustee_state */
62db7bccf4STejun Heo 	TRUSTEE_START		= 0,		/* start */
63db7bccf4STejun Heo 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
64db7bccf4STejun Heo 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
65db7bccf4STejun Heo 	TRUSTEE_RELEASE		= 3,		/* release workers */
66db7bccf4STejun Heo 	TRUSTEE_DONE		= 4,		/* trustee is done */
67c8e55f36STejun Heo 
68c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
69c8e55f36STejun Heo 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
70c8e55f36STejun Heo 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
71db7bccf4STejun Heo 
72e22bee78STejun Heo 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
73e22bee78STejun Heo 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
74e22bee78STejun Heo 
75e22bee78STejun Heo 	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
76e22bee78STejun Heo 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
77e22bee78STejun Heo 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
78db7bccf4STejun Heo 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
79e22bee78STejun Heo 
80e22bee78STejun Heo 	/*
81e22bee78STejun Heo 	 * Rescue workers are used only on emergencies and shared by
82e22bee78STejun Heo 	 * all cpus.  Give -20.
83e22bee78STejun Heo 	 */
84e22bee78STejun Heo 	RESCUER_NICE_LEVEL	= -20,
85c8e55f36STejun Heo };
86c8e55f36STejun Heo 
871da177e4SLinus Torvalds /*
884690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
894690c4abSTejun Heo  *
904690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
914690c4abSTejun Heo  *
92e22bee78STejun Heo  * P: Preemption protected.  Disabling preemption is enough and should
93e22bee78STejun Heo  *    only be modified and accessed from the local cpu.
94e22bee78STejun Heo  *
958b03ae3cSTejun Heo  * L: gcwq->lock protected.  Access with gcwq->lock held.
964690c4abSTejun Heo  *
97e22bee78STejun Heo  * X: During normal operation, modification requires gcwq->lock and
98e22bee78STejun Heo  *    should be done only from local cpu.  Either disabling preemption
99e22bee78STejun Heo  *    on local cpu or grabbing gcwq->lock is enough for read access.
100f3421797STejun Heo  *    If GCWQ_DISASSOCIATED is set, it's identical to L.
101e22bee78STejun Heo  *
10273f53c4aSTejun Heo  * F: wq->flush_mutex protected.
10373f53c4aSTejun Heo  *
1044690c4abSTejun Heo  * W: workqueue_lock protected.
1054690c4abSTejun Heo  */
1064690c4abSTejun Heo 
1078b03ae3cSTejun Heo struct global_cwq;
108c34056a3STejun Heo 
109e22bee78STejun Heo /*
110e22bee78STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers
111e22bee78STejun Heo  * are either serving the manager role, on idle list or on busy hash.
112e22bee78STejun Heo  */
113c34056a3STejun Heo struct worker {
114c8e55f36STejun Heo 	/* on idle list while idle, on busy hash table while busy */
115c8e55f36STejun Heo 	union {
116c8e55f36STejun Heo 		struct list_head	entry;	/* L: while idle */
117c8e55f36STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
118c8e55f36STejun Heo 	};
119c8e55f36STejun Heo 
120c34056a3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
1218cca0eeaSTejun Heo 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
122affee4b2STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
123c34056a3STejun Heo 	struct task_struct	*task;		/* I: worker task */
1248b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
125e22bee78STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
126e22bee78STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
127e22bee78STejun Heo 	unsigned int		flags;		/* X: flags */
128c34056a3STejun Heo 	int			id;		/* I: worker id */
129e22bee78STejun Heo 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
130c34056a3STejun Heo };
131c34056a3STejun Heo 
1324690c4abSTejun Heo /*
133e22bee78STejun Heo  * Global per-cpu workqueue.  There's one and only one for each cpu
134e22bee78STejun Heo  * and all works are queued and processed here regardless of their
135e22bee78STejun Heo  * target workqueues.
1368b03ae3cSTejun Heo  */
1378b03ae3cSTejun Heo struct global_cwq {
1388b03ae3cSTejun Heo 	spinlock_t		lock;		/* the gcwq lock */
1397e11629dSTejun Heo 	struct list_head	worklist;	/* L: list of pending works */
1408b03ae3cSTejun Heo 	unsigned int		cpu;		/* I: the associated cpu */
141db7bccf4STejun Heo 	unsigned int		flags;		/* L: GCWQ_* flags */
142c8e55f36STejun Heo 
143c8e55f36STejun Heo 	int			nr_workers;	/* L: total number of workers */
144c8e55f36STejun Heo 	int			nr_idle;	/* L: currently idle ones */
145c8e55f36STejun Heo 
146c8e55f36STejun Heo 	/* workers are chained either in the idle_list or busy_hash */
147e22bee78STejun Heo 	struct list_head	idle_list;	/* X: list of idle workers */
148c8e55f36STejun Heo 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
149c8e55f36STejun Heo 						/* L: hash of busy workers */
150c8e55f36STejun Heo 
151e22bee78STejun Heo 	struct timer_list	idle_timer;	/* L: worker idle timeout */
152e22bee78STejun Heo 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
153e22bee78STejun Heo 
1548b03ae3cSTejun Heo 	struct ida		worker_ida;	/* L: for worker IDs */
155db7bccf4STejun Heo 
156db7bccf4STejun Heo 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
157db7bccf4STejun Heo 	unsigned int		trustee_state;	/* L: trustee state */
158db7bccf4STejun Heo 	wait_queue_head_t	trustee_wait;	/* trustee wait */
159e22bee78STejun Heo 	struct worker		*first_idle;	/* L: first idle worker */
1608b03ae3cSTejun Heo } ____cacheline_aligned_in_smp;
1618b03ae3cSTejun Heo 
1628b03ae3cSTejun Heo /*
163502ca9d8STejun Heo  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
1640f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
1650f900049STejun Heo  * aligned at two's power of the number of flag bits.
1661da177e4SLinus Torvalds  */
1671da177e4SLinus Torvalds struct cpu_workqueue_struct {
1688b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
1694690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
17073f53c4aSTejun Heo 	int			work_color;	/* L: current color */
17173f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
17273f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
17373f53c4aSTejun Heo 						/* L: nr of in_flight works */
1741e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
175a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
1761e19ffc6STejun Heo 	struct list_head	delayed_works;	/* L: delayed works */
1770f900049STejun Heo };
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /*
18073f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
18173f53c4aSTejun Heo  */
18273f53c4aSTejun Heo struct wq_flusher {
18373f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
18473f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
18573f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
18673f53c4aSTejun Heo };
18773f53c4aSTejun Heo 
18873f53c4aSTejun Heo /*
1891da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
1901da177e4SLinus Torvalds  * per-CPU workqueues:
1911da177e4SLinus Torvalds  */
1921da177e4SLinus Torvalds struct workqueue_struct {
19397e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
194bdbc5dd7STejun Heo 	union {
195bdbc5dd7STejun Heo 		struct cpu_workqueue_struct __percpu	*pcpu;
196bdbc5dd7STejun Heo 		struct cpu_workqueue_struct		*single;
197bdbc5dd7STejun Heo 		unsigned long				v;
198bdbc5dd7STejun Heo 	} cpu_wq;				/* I: cwq's */
1994690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
20073f53c4aSTejun Heo 
20173f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
20273f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
20373f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
20473f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
20573f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
20673f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
20773f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
20873f53c4aSTejun Heo 
209e22bee78STejun Heo 	cpumask_var_t		mayday_mask;	/* cpus requesting rescue */
210e22bee78STejun Heo 	struct worker		*rescuer;	/* I: rescue worker */
211e22bee78STejun Heo 
212dcd989cbSTejun Heo 	int			saved_max_active; /* W: saved cwq max_active */
2134690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
2144e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
2154e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
2164e6045f1SJohannes Berg #endif
2171da177e4SLinus Torvalds };
2181da177e4SLinus Torvalds 
219d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly;
220d320c038STejun Heo struct workqueue_struct *system_long_wq __read_mostly;
221d320c038STejun Heo struct workqueue_struct *system_nrt_wq __read_mostly;
222f3421797STejun Heo struct workqueue_struct *system_unbound_wq __read_mostly;
223d320c038STejun Heo EXPORT_SYMBOL_GPL(system_wq);
224d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq);
225d320c038STejun Heo EXPORT_SYMBOL_GPL(system_nrt_wq);
226f3421797STejun Heo EXPORT_SYMBOL_GPL(system_unbound_wq);
227d320c038STejun Heo 
228db7bccf4STejun Heo #define for_each_busy_worker(worker, i, pos, gcwq)			\
229db7bccf4STejun Heo 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
230db7bccf4STejun Heo 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
231db7bccf4STejun Heo 
232f3421797STejun Heo static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
233f3421797STejun Heo 				  unsigned int sw)
234f3421797STejun Heo {
235f3421797STejun Heo 	if (cpu < nr_cpu_ids) {
236f3421797STejun Heo 		if (sw & 1) {
237f3421797STejun Heo 			cpu = cpumask_next(cpu, mask);
238f3421797STejun Heo 			if (cpu < nr_cpu_ids)
239f3421797STejun Heo 				return cpu;
240f3421797STejun Heo 		}
241f3421797STejun Heo 		if (sw & 2)
242f3421797STejun Heo 			return WORK_CPU_UNBOUND;
243f3421797STejun Heo 	}
244f3421797STejun Heo 	return WORK_CPU_NONE;
245f3421797STejun Heo }
246f3421797STejun Heo 
247f3421797STejun Heo static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
248f3421797STejun Heo 				struct workqueue_struct *wq)
249f3421797STejun Heo {
250f3421797STejun Heo 	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
251f3421797STejun Heo }
252f3421797STejun Heo 
253f3421797STejun Heo #define for_each_gcwq_cpu(cpu)						\
254f3421797STejun Heo 	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
255f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
256f3421797STejun Heo 	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
257f3421797STejun Heo 
258f3421797STejun Heo #define for_each_online_gcwq_cpu(cpu)					\
259f3421797STejun Heo 	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
260f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
261f3421797STejun Heo 	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
262f3421797STejun Heo 
263f3421797STejun Heo #define for_each_cwq_cpu(cpu, wq)					\
264f3421797STejun Heo 	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
265f3421797STejun Heo 	     (cpu) < WORK_CPU_NONE;					\
266f3421797STejun Heo 	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
267f3421797STejun Heo 
268dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
269dc186ad7SThomas Gleixner 
270dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
271dc186ad7SThomas Gleixner 
272dc186ad7SThomas Gleixner /*
273dc186ad7SThomas Gleixner  * fixup_init is called when:
274dc186ad7SThomas Gleixner  * - an active object is initialized
275dc186ad7SThomas Gleixner  */
276dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
277dc186ad7SThomas Gleixner {
278dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
279dc186ad7SThomas Gleixner 
280dc186ad7SThomas Gleixner 	switch (state) {
281dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
282dc186ad7SThomas Gleixner 		cancel_work_sync(work);
283dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
284dc186ad7SThomas Gleixner 		return 1;
285dc186ad7SThomas Gleixner 	default:
286dc186ad7SThomas Gleixner 		return 0;
287dc186ad7SThomas Gleixner 	}
288dc186ad7SThomas Gleixner }
289dc186ad7SThomas Gleixner 
290dc186ad7SThomas Gleixner /*
291dc186ad7SThomas Gleixner  * fixup_activate is called when:
292dc186ad7SThomas Gleixner  * - an active object is activated
293dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
294dc186ad7SThomas Gleixner  */
295dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
296dc186ad7SThomas Gleixner {
297dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
298dc186ad7SThomas Gleixner 
299dc186ad7SThomas Gleixner 	switch (state) {
300dc186ad7SThomas Gleixner 
301dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
302dc186ad7SThomas Gleixner 		/*
303dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
304dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
305dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
306dc186ad7SThomas Gleixner 		 */
30722df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
308dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
309dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
310dc186ad7SThomas Gleixner 			return 0;
311dc186ad7SThomas Gleixner 		}
312dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
313dc186ad7SThomas Gleixner 		return 0;
314dc186ad7SThomas Gleixner 
315dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
316dc186ad7SThomas Gleixner 		WARN_ON(1);
317dc186ad7SThomas Gleixner 
318dc186ad7SThomas Gleixner 	default:
319dc186ad7SThomas Gleixner 		return 0;
320dc186ad7SThomas Gleixner 	}
321dc186ad7SThomas Gleixner }
322dc186ad7SThomas Gleixner 
323dc186ad7SThomas Gleixner /*
324dc186ad7SThomas Gleixner  * fixup_free is called when:
325dc186ad7SThomas Gleixner  * - an active object is freed
326dc186ad7SThomas Gleixner  */
327dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
328dc186ad7SThomas Gleixner {
329dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
330dc186ad7SThomas Gleixner 
331dc186ad7SThomas Gleixner 	switch (state) {
332dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
333dc186ad7SThomas Gleixner 		cancel_work_sync(work);
334dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
335dc186ad7SThomas Gleixner 		return 1;
336dc186ad7SThomas Gleixner 	default:
337dc186ad7SThomas Gleixner 		return 0;
338dc186ad7SThomas Gleixner 	}
339dc186ad7SThomas Gleixner }
340dc186ad7SThomas Gleixner 
341dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
342dc186ad7SThomas Gleixner 	.name		= "work_struct",
343dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
344dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
345dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
346dc186ad7SThomas Gleixner };
347dc186ad7SThomas Gleixner 
348dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
349dc186ad7SThomas Gleixner {
350dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
351dc186ad7SThomas Gleixner }
352dc186ad7SThomas Gleixner 
353dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
354dc186ad7SThomas Gleixner {
355dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
356dc186ad7SThomas Gleixner }
357dc186ad7SThomas Gleixner 
358dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
359dc186ad7SThomas Gleixner {
360dc186ad7SThomas Gleixner 	if (onstack)
361dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
362dc186ad7SThomas Gleixner 	else
363dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
364dc186ad7SThomas Gleixner }
365dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
366dc186ad7SThomas Gleixner 
367dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
368dc186ad7SThomas Gleixner {
369dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
370dc186ad7SThomas Gleixner }
371dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
372dc186ad7SThomas Gleixner 
373dc186ad7SThomas Gleixner #else
374dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
375dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
376dc186ad7SThomas Gleixner #endif
377dc186ad7SThomas Gleixner 
37895402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
37995402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
3801da177e4SLinus Torvalds static LIST_HEAD(workqueues);
381a0a1a5fdSTejun Heo static bool workqueue_freezing;		/* W: have wqs started freezing? */
382c34056a3STejun Heo 
383e22bee78STejun Heo /*
384e22bee78STejun Heo  * The almighty global cpu workqueues.  nr_running is the only field
385e22bee78STejun Heo  * which is expected to be used frequently by other cpus via
386e22bee78STejun Heo  * try_to_wake_up().  Put it in a separate cacheline.
387e22bee78STejun Heo  */
3888b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq);
389e22bee78STejun Heo static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
3908b03ae3cSTejun Heo 
391f3421797STejun Heo /*
392f3421797STejun Heo  * Global cpu workqueue and nr_running counter for unbound gcwq.  The
393f3421797STejun Heo  * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
394f3421797STejun Heo  * workers have WORKER_UNBOUND set.
395f3421797STejun Heo  */
396f3421797STejun Heo static struct global_cwq unbound_global_cwq;
397f3421797STejun Heo static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);	/* always 0 */
398f3421797STejun Heo 
399c34056a3STejun Heo static int worker_thread(void *__worker);
4001da177e4SLinus Torvalds 
4018b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu)
4028b03ae3cSTejun Heo {
403f3421797STejun Heo 	if (cpu != WORK_CPU_UNBOUND)
4048b03ae3cSTejun Heo 		return &per_cpu(global_cwq, cpu);
405f3421797STejun Heo 	else
406f3421797STejun Heo 		return &unbound_global_cwq;
4078b03ae3cSTejun Heo }
4088b03ae3cSTejun Heo 
409e22bee78STejun Heo static atomic_t *get_gcwq_nr_running(unsigned int cpu)
410e22bee78STejun Heo {
411f3421797STejun Heo 	if (cpu != WORK_CPU_UNBOUND)
412e22bee78STejun Heo 		return &per_cpu(gcwq_nr_running, cpu);
413f3421797STejun Heo 	else
414f3421797STejun Heo 		return &unbound_gcwq_nr_running;
415e22bee78STejun Heo }
416e22bee78STejun Heo 
4174690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
4184690c4abSTejun Heo 					    struct workqueue_struct *wq)
419a848e3b6SOleg Nesterov {
420f3421797STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
421f3421797STejun Heo 		if (likely(cpu < nr_cpu_ids)) {
422f3421797STejun Heo #ifdef CONFIG_SMP
423bdbc5dd7STejun Heo 			return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
424f3421797STejun Heo #else
425f3421797STejun Heo 			return wq->cpu_wq.single;
426bdbc5dd7STejun Heo #endif
427a848e3b6SOleg Nesterov 		}
428f3421797STejun Heo 	} else if (likely(cpu == WORK_CPU_UNBOUND))
429f3421797STejun Heo 		return wq->cpu_wq.single;
430f3421797STejun Heo 	return NULL;
431f3421797STejun Heo }
432a848e3b6SOleg Nesterov 
43373f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
43473f53c4aSTejun Heo {
43573f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
43673f53c4aSTejun Heo }
43773f53c4aSTejun Heo 
43873f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
43973f53c4aSTejun Heo {
44073f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
44173f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
44273f53c4aSTejun Heo }
44373f53c4aSTejun Heo 
44473f53c4aSTejun Heo static int work_next_color(int color)
44573f53c4aSTejun Heo {
44673f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
44773f53c4aSTejun Heo }
44873f53c4aSTejun Heo 
4494594bf15SDavid Howells /*
4507a22ad75STejun Heo  * Work data points to the cwq while a work is on queue.  Once
4517a22ad75STejun Heo  * execution starts, it points to the cpu the work was last on.  This
4527a22ad75STejun Heo  * can be distinguished by comparing the data value against
4537a22ad75STejun Heo  * PAGE_OFFSET.
4547a22ad75STejun Heo  *
4557a22ad75STejun Heo  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
4567a22ad75STejun Heo  * cwq, cpu or clear work->data.  These functions should only be
4577a22ad75STejun Heo  * called while the work is owned - ie. while the PENDING bit is set.
4587a22ad75STejun Heo  *
4597a22ad75STejun Heo  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
4607a22ad75STejun Heo  * corresponding to a work.  gcwq is available once the work has been
4617a22ad75STejun Heo  * queued anywhere after initialization.  cwq is available only from
4627a22ad75STejun Heo  * queueing until execution starts.
4634594bf15SDavid Howells  */
4647a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data,
4657a22ad75STejun Heo 				 unsigned long flags)
4667a22ad75STejun Heo {
4677a22ad75STejun Heo 	BUG_ON(!work_pending(work));
4687a22ad75STejun Heo 	atomic_long_set(&work->data, data | flags | work_static(work));
4697a22ad75STejun Heo }
4707a22ad75STejun Heo 
4717a22ad75STejun Heo static void set_work_cwq(struct work_struct *work,
4724690c4abSTejun Heo 			 struct cpu_workqueue_struct *cwq,
4734690c4abSTejun Heo 			 unsigned long extra_flags)
474365970a1SDavid Howells {
4757a22ad75STejun Heo 	set_work_data(work, (unsigned long)cwq,
47622df02bbSTejun Heo 		      WORK_STRUCT_PENDING | extra_flags);
477365970a1SDavid Howells }
478365970a1SDavid Howells 
4797a22ad75STejun Heo static void set_work_cpu(struct work_struct *work, unsigned int cpu)
4804d707b9fSOleg Nesterov {
4817a22ad75STejun Heo 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
4824d707b9fSOleg Nesterov }
4834d707b9fSOleg Nesterov 
4847a22ad75STejun Heo static void clear_work_data(struct work_struct *work)
485365970a1SDavid Howells {
4867a22ad75STejun Heo 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
4877a22ad75STejun Heo }
4887a22ad75STejun Heo 
4897a22ad75STejun Heo static inline unsigned long get_work_data(struct work_struct *work)
4907a22ad75STejun Heo {
4917a22ad75STejun Heo 	return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
4927a22ad75STejun Heo }
4937a22ad75STejun Heo 
4947a22ad75STejun Heo static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
4957a22ad75STejun Heo {
4967a22ad75STejun Heo 	unsigned long data = get_work_data(work);
4977a22ad75STejun Heo 
4987a22ad75STejun Heo 	return data >= PAGE_OFFSET ? (void *)data : NULL;
4997a22ad75STejun Heo }
5007a22ad75STejun Heo 
5017a22ad75STejun Heo static struct global_cwq *get_work_gcwq(struct work_struct *work)
5027a22ad75STejun Heo {
5037a22ad75STejun Heo 	unsigned long data = get_work_data(work);
5047a22ad75STejun Heo 	unsigned int cpu;
5057a22ad75STejun Heo 
5067a22ad75STejun Heo 	if (data >= PAGE_OFFSET)
5077a22ad75STejun Heo 		return ((struct cpu_workqueue_struct *)data)->gcwq;
5087a22ad75STejun Heo 
5097a22ad75STejun Heo 	cpu = data >> WORK_STRUCT_FLAG_BITS;
510bdbc5dd7STejun Heo 	if (cpu == WORK_CPU_NONE)
5117a22ad75STejun Heo 		return NULL;
5127a22ad75STejun Heo 
513f3421797STejun Heo 	BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
5147a22ad75STejun Heo 	return get_gcwq(cpu);
515365970a1SDavid Howells }
516365970a1SDavid Howells 
517e22bee78STejun Heo /*
518e22bee78STejun Heo  * Policy functions.  These define the policies on how the global
519e22bee78STejun Heo  * worker pool is managed.  Unless noted otherwise, these functions
520e22bee78STejun Heo  * assume that they're being called with gcwq->lock held.
521e22bee78STejun Heo  */
522e22bee78STejun Heo 
523649027d7STejun Heo static bool __need_more_worker(struct global_cwq *gcwq)
524649027d7STejun Heo {
525649027d7STejun Heo 	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
526649027d7STejun Heo 		gcwq->flags & GCWQ_HIGHPRI_PENDING;
527649027d7STejun Heo }
528649027d7STejun Heo 
529e22bee78STejun Heo /*
530e22bee78STejun Heo  * Need to wake up a worker?  Called from anything but currently
531e22bee78STejun Heo  * running workers.
532e22bee78STejun Heo  */
533e22bee78STejun Heo static bool need_more_worker(struct global_cwq *gcwq)
534e22bee78STejun Heo {
535649027d7STejun Heo 	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
536e22bee78STejun Heo }
537e22bee78STejun Heo 
538e22bee78STejun Heo /* Can I start working?  Called from busy but !running workers. */
539e22bee78STejun Heo static bool may_start_working(struct global_cwq *gcwq)
540e22bee78STejun Heo {
541e22bee78STejun Heo 	return gcwq->nr_idle;
542e22bee78STejun Heo }
543e22bee78STejun Heo 
544e22bee78STejun Heo /* Do I need to keep working?  Called from currently running workers. */
545e22bee78STejun Heo static bool keep_working(struct global_cwq *gcwq)
546e22bee78STejun Heo {
547e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
548e22bee78STejun Heo 
549e22bee78STejun Heo 	return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
550e22bee78STejun Heo }
551e22bee78STejun Heo 
552e22bee78STejun Heo /* Do we need a new worker?  Called from manager. */
553e22bee78STejun Heo static bool need_to_create_worker(struct global_cwq *gcwq)
554e22bee78STejun Heo {
555e22bee78STejun Heo 	return need_more_worker(gcwq) && !may_start_working(gcwq);
556e22bee78STejun Heo }
557e22bee78STejun Heo 
558e22bee78STejun Heo /* Do I need to be the manager? */
559e22bee78STejun Heo static bool need_to_manage_workers(struct global_cwq *gcwq)
560e22bee78STejun Heo {
561e22bee78STejun Heo 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
562e22bee78STejun Heo }
563e22bee78STejun Heo 
564e22bee78STejun Heo /* Do we have too many workers and should some go away? */
565e22bee78STejun Heo static bool too_many_workers(struct global_cwq *gcwq)
566e22bee78STejun Heo {
567e22bee78STejun Heo 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
568e22bee78STejun Heo 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
569e22bee78STejun Heo 	int nr_busy = gcwq->nr_workers - nr_idle;
570e22bee78STejun Heo 
571e22bee78STejun Heo 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
572e22bee78STejun Heo }
573e22bee78STejun Heo 
574e22bee78STejun Heo /*
575e22bee78STejun Heo  * Wake up functions.
576e22bee78STejun Heo  */
577e22bee78STejun Heo 
5787e11629dSTejun Heo /* Return the first worker.  Safe with preemption disabled */
5797e11629dSTejun Heo static struct worker *first_worker(struct global_cwq *gcwq)
5807e11629dSTejun Heo {
5817e11629dSTejun Heo 	if (unlikely(list_empty(&gcwq->idle_list)))
5827e11629dSTejun Heo 		return NULL;
5837e11629dSTejun Heo 
5847e11629dSTejun Heo 	return list_first_entry(&gcwq->idle_list, struct worker, entry);
5857e11629dSTejun Heo }
5867e11629dSTejun Heo 
5877e11629dSTejun Heo /**
5887e11629dSTejun Heo  * wake_up_worker - wake up an idle worker
5897e11629dSTejun Heo  * @gcwq: gcwq to wake worker for
5907e11629dSTejun Heo  *
5917e11629dSTejun Heo  * Wake up the first idle worker of @gcwq.
5927e11629dSTejun Heo  *
5937e11629dSTejun Heo  * CONTEXT:
5947e11629dSTejun Heo  * spin_lock_irq(gcwq->lock).
5957e11629dSTejun Heo  */
5967e11629dSTejun Heo static void wake_up_worker(struct global_cwq *gcwq)
5977e11629dSTejun Heo {
5987e11629dSTejun Heo 	struct worker *worker = first_worker(gcwq);
5997e11629dSTejun Heo 
6007e11629dSTejun Heo 	if (likely(worker))
6017e11629dSTejun Heo 		wake_up_process(worker->task);
6027e11629dSTejun Heo }
6037e11629dSTejun Heo 
6044690c4abSTejun Heo /**
605e22bee78STejun Heo  * wq_worker_waking_up - a worker is waking up
606e22bee78STejun Heo  * @task: task waking up
607e22bee78STejun Heo  * @cpu: CPU @task is waking up to
608e22bee78STejun Heo  *
609e22bee78STejun Heo  * This function is called during try_to_wake_up() when a worker is
610e22bee78STejun Heo  * being awoken.
611e22bee78STejun Heo  *
612e22bee78STejun Heo  * CONTEXT:
613e22bee78STejun Heo  * spin_lock_irq(rq->lock)
614e22bee78STejun Heo  */
615e22bee78STejun Heo void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
616e22bee78STejun Heo {
617e22bee78STejun Heo 	struct worker *worker = kthread_data(task);
618e22bee78STejun Heo 
619e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
620e22bee78STejun Heo 		atomic_inc(get_gcwq_nr_running(cpu));
621e22bee78STejun Heo }
622e22bee78STejun Heo 
623e22bee78STejun Heo /**
624e22bee78STejun Heo  * wq_worker_sleeping - a worker is going to sleep
625e22bee78STejun Heo  * @task: task going to sleep
626e22bee78STejun Heo  * @cpu: CPU in question, must be the current CPU number
627e22bee78STejun Heo  *
628e22bee78STejun Heo  * This function is called during schedule() when a busy worker is
629e22bee78STejun Heo  * going to sleep.  Worker on the same cpu can be woken up by
630e22bee78STejun Heo  * returning pointer to its task.
631e22bee78STejun Heo  *
632e22bee78STejun Heo  * CONTEXT:
633e22bee78STejun Heo  * spin_lock_irq(rq->lock)
634e22bee78STejun Heo  *
635e22bee78STejun Heo  * RETURNS:
636e22bee78STejun Heo  * Worker task on @cpu to wake up, %NULL if none.
637e22bee78STejun Heo  */
638e22bee78STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task,
639e22bee78STejun Heo 				       unsigned int cpu)
640e22bee78STejun Heo {
641e22bee78STejun Heo 	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
642e22bee78STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
643e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(cpu);
644e22bee78STejun Heo 
645e22bee78STejun Heo 	if (unlikely(worker->flags & WORKER_NOT_RUNNING))
646e22bee78STejun Heo 		return NULL;
647e22bee78STejun Heo 
648e22bee78STejun Heo 	/* this can only happen on the local cpu */
649e22bee78STejun Heo 	BUG_ON(cpu != raw_smp_processor_id());
650e22bee78STejun Heo 
651e22bee78STejun Heo 	/*
652e22bee78STejun Heo 	 * The counterpart of the following dec_and_test, implied mb,
653e22bee78STejun Heo 	 * worklist not empty test sequence is in insert_work().
654e22bee78STejun Heo 	 * Please read comment there.
655e22bee78STejun Heo 	 *
656e22bee78STejun Heo 	 * NOT_RUNNING is clear.  This means that trustee is not in
657e22bee78STejun Heo 	 * charge and we're running on the local cpu w/ rq lock held
658e22bee78STejun Heo 	 * and preemption disabled, which in turn means that none else
659e22bee78STejun Heo 	 * could be manipulating idle_list, so dereferencing idle_list
660e22bee78STejun Heo 	 * without gcwq lock is safe.
661e22bee78STejun Heo 	 */
662e22bee78STejun Heo 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
663e22bee78STejun Heo 		to_wakeup = first_worker(gcwq);
664e22bee78STejun Heo 	return to_wakeup ? to_wakeup->task : NULL;
665e22bee78STejun Heo }
666e22bee78STejun Heo 
667e22bee78STejun Heo /**
668e22bee78STejun Heo  * worker_set_flags - set worker flags and adjust nr_running accordingly
669cb444766STejun Heo  * @worker: self
670d302f017STejun Heo  * @flags: flags to set
671d302f017STejun Heo  * @wakeup: wakeup an idle worker if necessary
672d302f017STejun Heo  *
673e22bee78STejun Heo  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
674e22bee78STejun Heo  * nr_running becomes zero and @wakeup is %true, an idle worker is
675e22bee78STejun Heo  * woken up.
676d302f017STejun Heo  *
677cb444766STejun Heo  * CONTEXT:
678cb444766STejun Heo  * spin_lock_irq(gcwq->lock)
679d302f017STejun Heo  */
680d302f017STejun Heo static inline void worker_set_flags(struct worker *worker, unsigned int flags,
681d302f017STejun Heo 				    bool wakeup)
682d302f017STejun Heo {
683e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
684e22bee78STejun Heo 
685cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
686cb444766STejun Heo 
687e22bee78STejun Heo 	/*
688e22bee78STejun Heo 	 * If transitioning into NOT_RUNNING, adjust nr_running and
689e22bee78STejun Heo 	 * wake up an idle worker as necessary if requested by
690e22bee78STejun Heo 	 * @wakeup.
691e22bee78STejun Heo 	 */
692e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) &&
693e22bee78STejun Heo 	    !(worker->flags & WORKER_NOT_RUNNING)) {
694e22bee78STejun Heo 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
695e22bee78STejun Heo 
696e22bee78STejun Heo 		if (wakeup) {
697e22bee78STejun Heo 			if (atomic_dec_and_test(nr_running) &&
698e22bee78STejun Heo 			    !list_empty(&gcwq->worklist))
699e22bee78STejun Heo 				wake_up_worker(gcwq);
700e22bee78STejun Heo 		} else
701e22bee78STejun Heo 			atomic_dec(nr_running);
702e22bee78STejun Heo 	}
703e22bee78STejun Heo 
704d302f017STejun Heo 	worker->flags |= flags;
705d302f017STejun Heo }
706d302f017STejun Heo 
707d302f017STejun Heo /**
708e22bee78STejun Heo  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
709cb444766STejun Heo  * @worker: self
710d302f017STejun Heo  * @flags: flags to clear
711d302f017STejun Heo  *
712e22bee78STejun Heo  * Clear @flags in @worker->flags and adjust nr_running accordingly.
713d302f017STejun Heo  *
714cb444766STejun Heo  * CONTEXT:
715cb444766STejun Heo  * spin_lock_irq(gcwq->lock)
716d302f017STejun Heo  */
717d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
718d302f017STejun Heo {
719e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
720e22bee78STejun Heo 	unsigned int oflags = worker->flags;
721e22bee78STejun Heo 
722cb444766STejun Heo 	WARN_ON_ONCE(worker->task != current);
723cb444766STejun Heo 
724d302f017STejun Heo 	worker->flags &= ~flags;
725e22bee78STejun Heo 
726e22bee78STejun Heo 	/* if transitioning out of NOT_RUNNING, increment nr_running */
727e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
728e22bee78STejun Heo 		if (!(worker->flags & WORKER_NOT_RUNNING))
729e22bee78STejun Heo 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
730d302f017STejun Heo }
731d302f017STejun Heo 
732d302f017STejun Heo /**
733c8e55f36STejun Heo  * busy_worker_head - return the busy hash head for a work
734c8e55f36STejun Heo  * @gcwq: gcwq of interest
735c8e55f36STejun Heo  * @work: work to be hashed
736c8e55f36STejun Heo  *
737c8e55f36STejun Heo  * Return hash head of @gcwq for @work.
738c8e55f36STejun Heo  *
739c8e55f36STejun Heo  * CONTEXT:
740c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
741c8e55f36STejun Heo  *
742c8e55f36STejun Heo  * RETURNS:
743c8e55f36STejun Heo  * Pointer to the hash head.
744c8e55f36STejun Heo  */
745c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
746c8e55f36STejun Heo 					   struct work_struct *work)
747c8e55f36STejun Heo {
748c8e55f36STejun Heo 	const int base_shift = ilog2(sizeof(struct work_struct));
749c8e55f36STejun Heo 	unsigned long v = (unsigned long)work;
750c8e55f36STejun Heo 
751c8e55f36STejun Heo 	/* simple shift and fold hash, do we need something better? */
752c8e55f36STejun Heo 	v >>= base_shift;
753c8e55f36STejun Heo 	v += v >> BUSY_WORKER_HASH_ORDER;
754c8e55f36STejun Heo 	v &= BUSY_WORKER_HASH_MASK;
755c8e55f36STejun Heo 
756c8e55f36STejun Heo 	return &gcwq->busy_hash[v];
757c8e55f36STejun Heo }
758c8e55f36STejun Heo 
759c8e55f36STejun Heo /**
7608cca0eeaSTejun Heo  * __find_worker_executing_work - find worker which is executing a work
7618cca0eeaSTejun Heo  * @gcwq: gcwq of interest
7628cca0eeaSTejun Heo  * @bwh: hash head as returned by busy_worker_head()
7638cca0eeaSTejun Heo  * @work: work to find worker for
7648cca0eeaSTejun Heo  *
7658cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  @bwh should be
7668cca0eeaSTejun Heo  * the hash head obtained by calling busy_worker_head() with the same
7678cca0eeaSTejun Heo  * work.
7688cca0eeaSTejun Heo  *
7698cca0eeaSTejun Heo  * CONTEXT:
7708cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
7718cca0eeaSTejun Heo  *
7728cca0eeaSTejun Heo  * RETURNS:
7738cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
7748cca0eeaSTejun Heo  * otherwise.
7758cca0eeaSTejun Heo  */
7768cca0eeaSTejun Heo static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
7778cca0eeaSTejun Heo 						   struct hlist_head *bwh,
7788cca0eeaSTejun Heo 						   struct work_struct *work)
7798cca0eeaSTejun Heo {
7808cca0eeaSTejun Heo 	struct worker *worker;
7818cca0eeaSTejun Heo 	struct hlist_node *tmp;
7828cca0eeaSTejun Heo 
7838cca0eeaSTejun Heo 	hlist_for_each_entry(worker, tmp, bwh, hentry)
7848cca0eeaSTejun Heo 		if (worker->current_work == work)
7858cca0eeaSTejun Heo 			return worker;
7868cca0eeaSTejun Heo 	return NULL;
7878cca0eeaSTejun Heo }
7888cca0eeaSTejun Heo 
7898cca0eeaSTejun Heo /**
7908cca0eeaSTejun Heo  * find_worker_executing_work - find worker which is executing a work
7918cca0eeaSTejun Heo  * @gcwq: gcwq of interest
7928cca0eeaSTejun Heo  * @work: work to find worker for
7938cca0eeaSTejun Heo  *
7948cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  This function is
7958cca0eeaSTejun Heo  * identical to __find_worker_executing_work() except that this
7968cca0eeaSTejun Heo  * function calculates @bwh itself.
7978cca0eeaSTejun Heo  *
7988cca0eeaSTejun Heo  * CONTEXT:
7998cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
8008cca0eeaSTejun Heo  *
8018cca0eeaSTejun Heo  * RETURNS:
8028cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
8038cca0eeaSTejun Heo  * otherwise.
8048cca0eeaSTejun Heo  */
8058cca0eeaSTejun Heo static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
8068cca0eeaSTejun Heo 						 struct work_struct *work)
8078cca0eeaSTejun Heo {
8088cca0eeaSTejun Heo 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
8098cca0eeaSTejun Heo 					    work);
8108cca0eeaSTejun Heo }
8118cca0eeaSTejun Heo 
8128cca0eeaSTejun Heo /**
813649027d7STejun Heo  * gcwq_determine_ins_pos - find insertion position
814649027d7STejun Heo  * @gcwq: gcwq of interest
815649027d7STejun Heo  * @cwq: cwq a work is being queued for
816649027d7STejun Heo  *
817649027d7STejun Heo  * A work for @cwq is about to be queued on @gcwq, determine insertion
818649027d7STejun Heo  * position for the work.  If @cwq is for HIGHPRI wq, the work is
819649027d7STejun Heo  * queued at the head of the queue but in FIFO order with respect to
820649027d7STejun Heo  * other HIGHPRI works; otherwise, at the end of the queue.  This
821649027d7STejun Heo  * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
822649027d7STejun Heo  * there are HIGHPRI works pending.
823649027d7STejun Heo  *
824649027d7STejun Heo  * CONTEXT:
825649027d7STejun Heo  * spin_lock_irq(gcwq->lock).
826649027d7STejun Heo  *
827649027d7STejun Heo  * RETURNS:
828649027d7STejun Heo  * Pointer to inserstion position.
829649027d7STejun Heo  */
830649027d7STejun Heo static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
831649027d7STejun Heo 					       struct cpu_workqueue_struct *cwq)
832649027d7STejun Heo {
833649027d7STejun Heo 	struct work_struct *twork;
834649027d7STejun Heo 
835649027d7STejun Heo 	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
836649027d7STejun Heo 		return &gcwq->worklist;
837649027d7STejun Heo 
838649027d7STejun Heo 	list_for_each_entry(twork, &gcwq->worklist, entry) {
839649027d7STejun Heo 		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
840649027d7STejun Heo 
841649027d7STejun Heo 		if (!(tcwq->wq->flags & WQ_HIGHPRI))
842649027d7STejun Heo 			break;
843649027d7STejun Heo 	}
844649027d7STejun Heo 
845649027d7STejun Heo 	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
846649027d7STejun Heo 	return &twork->entry;
847649027d7STejun Heo }
848649027d7STejun Heo 
849649027d7STejun Heo /**
8507e11629dSTejun Heo  * insert_work - insert a work into gcwq
8514690c4abSTejun Heo  * @cwq: cwq @work belongs to
8524690c4abSTejun Heo  * @work: work to insert
8534690c4abSTejun Heo  * @head: insertion point
8544690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
8554690c4abSTejun Heo  *
8567e11629dSTejun Heo  * Insert @work which belongs to @cwq into @gcwq after @head.
8577e11629dSTejun Heo  * @extra_flags is or'd to work_struct flags.
8584690c4abSTejun Heo  *
8594690c4abSTejun Heo  * CONTEXT:
8608b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
8614690c4abSTejun Heo  */
862b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
8634690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
8644690c4abSTejun Heo 			unsigned int extra_flags)
865b89deed3SOleg Nesterov {
866e22bee78STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
867e22bee78STejun Heo 
8684690c4abSTejun Heo 	/* we own @work, set data and link */
8697a22ad75STejun Heo 	set_work_cwq(work, cwq, extra_flags);
8704690c4abSTejun Heo 
8716e84d644SOleg Nesterov 	/*
8726e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
8736e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
8746e84d644SOleg Nesterov 	 */
8756e84d644SOleg Nesterov 	smp_wmb();
8764690c4abSTejun Heo 
8771a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
878e22bee78STejun Heo 
879e22bee78STejun Heo 	/*
880e22bee78STejun Heo 	 * Ensure either worker_sched_deactivated() sees the above
881e22bee78STejun Heo 	 * list_add_tail() or we see zero nr_running to avoid workers
882e22bee78STejun Heo 	 * lying around lazily while there are works to be processed.
883e22bee78STejun Heo 	 */
884e22bee78STejun Heo 	smp_mb();
885e22bee78STejun Heo 
886649027d7STejun Heo 	if (__need_more_worker(gcwq))
887e22bee78STejun Heo 		wake_up_worker(gcwq);
888b89deed3SOleg Nesterov }
889b89deed3SOleg Nesterov 
8904690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
8911da177e4SLinus Torvalds 			 struct work_struct *work)
8921da177e4SLinus Torvalds {
893502ca9d8STejun Heo 	struct global_cwq *gcwq;
894502ca9d8STejun Heo 	struct cpu_workqueue_struct *cwq;
8951e19ffc6STejun Heo 	struct list_head *worklist;
8961da177e4SLinus Torvalds 	unsigned long flags;
8971da177e4SLinus Torvalds 
898dc186ad7SThomas Gleixner 	debug_work_activate(work);
8991e19ffc6STejun Heo 
900*c7fc77f7STejun Heo 	/* determine gcwq to use */
901*c7fc77f7STejun Heo 	if (!(wq->flags & WQ_UNBOUND)) {
902*c7fc77f7STejun Heo 		struct global_cwq *last_gcwq;
903*c7fc77f7STejun Heo 
904f3421797STejun Heo 		if (unlikely(cpu == WORK_CPU_UNBOUND))
905f3421797STejun Heo 			cpu = raw_smp_processor_id();
906f3421797STejun Heo 
90718aa9effSTejun Heo 		/*
90818aa9effSTejun Heo 		 * It's multi cpu.  If @wq is non-reentrant and @work
90918aa9effSTejun Heo 		 * was previously on a different cpu, it might still
91018aa9effSTejun Heo 		 * be running there, in which case the work needs to
91118aa9effSTejun Heo 		 * be queued on that cpu to guarantee non-reentrance.
91218aa9effSTejun Heo 		 */
913502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
91418aa9effSTejun Heo 		if (wq->flags & WQ_NON_REENTRANT &&
91518aa9effSTejun Heo 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
91618aa9effSTejun Heo 			struct worker *worker;
91718aa9effSTejun Heo 
91818aa9effSTejun Heo 			spin_lock_irqsave(&last_gcwq->lock, flags);
91918aa9effSTejun Heo 
92018aa9effSTejun Heo 			worker = find_worker_executing_work(last_gcwq, work);
92118aa9effSTejun Heo 
92218aa9effSTejun Heo 			if (worker && worker->current_cwq->wq == wq)
92318aa9effSTejun Heo 				gcwq = last_gcwq;
92418aa9effSTejun Heo 			else {
92518aa9effSTejun Heo 				/* meh... not running there, queue here */
92618aa9effSTejun Heo 				spin_unlock_irqrestore(&last_gcwq->lock, flags);
92718aa9effSTejun Heo 				spin_lock_irqsave(&gcwq->lock, flags);
92818aa9effSTejun Heo 			}
92918aa9effSTejun Heo 		} else
9308b03ae3cSTejun Heo 			spin_lock_irqsave(&gcwq->lock, flags);
931f3421797STejun Heo 	} else {
932f3421797STejun Heo 		gcwq = get_gcwq(WORK_CPU_UNBOUND);
933f3421797STejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
934502ca9d8STejun Heo 	}
935502ca9d8STejun Heo 
936502ca9d8STejun Heo 	/* gcwq determined, get cwq and queue */
937502ca9d8STejun Heo 	cwq = get_cwq(gcwq->cpu, wq);
938502ca9d8STejun Heo 
9394690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
9401e19ffc6STejun Heo 
94173f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
9421e19ffc6STejun Heo 
9431e19ffc6STejun Heo 	if (likely(cwq->nr_active < cwq->max_active)) {
9441e19ffc6STejun Heo 		cwq->nr_active++;
945649027d7STejun Heo 		worklist = gcwq_determine_ins_pos(gcwq, cwq);
9461e19ffc6STejun Heo 	} else
9471e19ffc6STejun Heo 		worklist = &cwq->delayed_works;
9481e19ffc6STejun Heo 
9491e19ffc6STejun Heo 	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
9501e19ffc6STejun Heo 
9518b03ae3cSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
9521da177e4SLinus Torvalds }
9531da177e4SLinus Torvalds 
9540fcb78c2SRolf Eike Beer /**
9550fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
9560fcb78c2SRolf Eike Beer  * @wq: workqueue to use
9570fcb78c2SRolf Eike Beer  * @work: work to queue
9580fcb78c2SRolf Eike Beer  *
959057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
9601da177e4SLinus Torvalds  *
96100dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
96200dfcaf7SOleg Nesterov  * it can be processed by another CPU.
9631da177e4SLinus Torvalds  */
9647ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
9651da177e4SLinus Torvalds {
966ef1ca236SOleg Nesterov 	int ret;
9671da177e4SLinus Torvalds 
968ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
969a848e3b6SOleg Nesterov 	put_cpu();
970ef1ca236SOleg Nesterov 
9711da177e4SLinus Torvalds 	return ret;
9721da177e4SLinus Torvalds }
973ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
9741da177e4SLinus Torvalds 
975c1a220e7SZhang Rui /**
976c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
977c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
978c1a220e7SZhang Rui  * @wq: workqueue to use
979c1a220e7SZhang Rui  * @work: work to queue
980c1a220e7SZhang Rui  *
981c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
982c1a220e7SZhang Rui  *
983c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
984c1a220e7SZhang Rui  * can't go away.
985c1a220e7SZhang Rui  */
986c1a220e7SZhang Rui int
987c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
988c1a220e7SZhang Rui {
989c1a220e7SZhang Rui 	int ret = 0;
990c1a220e7SZhang Rui 
99122df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
9924690c4abSTejun Heo 		__queue_work(cpu, wq, work);
993c1a220e7SZhang Rui 		ret = 1;
994c1a220e7SZhang Rui 	}
995c1a220e7SZhang Rui 	return ret;
996c1a220e7SZhang Rui }
997c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
998c1a220e7SZhang Rui 
9996d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
10001da177e4SLinus Torvalds {
100152bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
10027a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
10031da177e4SLinus Torvalds 
10044690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
10051da177e4SLinus Torvalds }
10061da177e4SLinus Torvalds 
10070fcb78c2SRolf Eike Beer /**
10080fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
10090fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1010af9997e4SRandy Dunlap  * @dwork: delayable work to queue
10110fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
10120fcb78c2SRolf Eike Beer  *
1013057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10140fcb78c2SRolf Eike Beer  */
10157ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
101652bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10171da177e4SLinus Torvalds {
101852bad64dSDavid Howells 	if (delay == 0)
101963bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
10201da177e4SLinus Torvalds 
102163bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
10221da177e4SLinus Torvalds }
1023ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
10241da177e4SLinus Torvalds 
10250fcb78c2SRolf Eike Beer /**
10260fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
10270fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
10280fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1029af9997e4SRandy Dunlap  * @dwork: work to queue
10300fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
10310fcb78c2SRolf Eike Beer  *
1032057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10330fcb78c2SRolf Eike Beer  */
10347a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
103552bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10367a6bc1cdSVenkatesh Pallipadi {
10377a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
103852bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
103952bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
10407a6bc1cdSVenkatesh Pallipadi 
104122df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1042*c7fc77f7STejun Heo 		unsigned int lcpu;
10437a22ad75STejun Heo 
10447a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
10457a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
10467a6bc1cdSVenkatesh Pallipadi 
10478a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
1048*c7fc77f7STejun Heo 
10497a22ad75STejun Heo 		/*
10507a22ad75STejun Heo 		 * This stores cwq for the moment, for the timer_fn.
10517a22ad75STejun Heo 		 * Note that the work's gcwq is preserved to allow
10527a22ad75STejun Heo 		 * reentrance detection for delayed works.
10537a22ad75STejun Heo 		 */
1054*c7fc77f7STejun Heo 		if (!(wq->flags & WQ_UNBOUND)) {
1055*c7fc77f7STejun Heo 			struct global_cwq *gcwq = get_work_gcwq(work);
1056*c7fc77f7STejun Heo 
1057*c7fc77f7STejun Heo 			if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1058*c7fc77f7STejun Heo 				lcpu = gcwq->cpu;
1059*c7fc77f7STejun Heo 			else
1060*c7fc77f7STejun Heo 				lcpu = raw_smp_processor_id();
1061*c7fc77f7STejun Heo 		} else
1062*c7fc77f7STejun Heo 			lcpu = WORK_CPU_UNBOUND;
1063*c7fc77f7STejun Heo 
10647a22ad75STejun Heo 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
1065*c7fc77f7STejun Heo 
10667a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
106752bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
10687a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
106963bc0362SOleg Nesterov 
107063bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
10717a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
107263bc0362SOleg Nesterov 		else
107363bc0362SOleg Nesterov 			add_timer(timer);
10747a6bc1cdSVenkatesh Pallipadi 		ret = 1;
10757a6bc1cdSVenkatesh Pallipadi 	}
10767a6bc1cdSVenkatesh Pallipadi 	return ret;
10777a6bc1cdSVenkatesh Pallipadi }
1078ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
10791da177e4SLinus Torvalds 
1080c8e55f36STejun Heo /**
1081c8e55f36STejun Heo  * worker_enter_idle - enter idle state
1082c8e55f36STejun Heo  * @worker: worker which is entering idle state
1083c8e55f36STejun Heo  *
1084c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
1085c8e55f36STejun Heo  * necessary.
1086c8e55f36STejun Heo  *
1087c8e55f36STejun Heo  * LOCKING:
1088c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1089c8e55f36STejun Heo  */
1090c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
1091c8e55f36STejun Heo {
1092c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1093c8e55f36STejun Heo 
1094c8e55f36STejun Heo 	BUG_ON(worker->flags & WORKER_IDLE);
1095c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->entry) &&
1096c8e55f36STejun Heo 	       (worker->hentry.next || worker->hentry.pprev));
1097c8e55f36STejun Heo 
1098cb444766STejun Heo 	/* can't use worker_set_flags(), also called from start_worker() */
1099cb444766STejun Heo 	worker->flags |= WORKER_IDLE;
1100c8e55f36STejun Heo 	gcwq->nr_idle++;
1101e22bee78STejun Heo 	worker->last_active = jiffies;
1102c8e55f36STejun Heo 
1103c8e55f36STejun Heo 	/* idle_list is LIFO */
1104c8e55f36STejun Heo 	list_add(&worker->entry, &gcwq->idle_list);
1105db7bccf4STejun Heo 
1106e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_ROGUE))) {
1107e22bee78STejun Heo 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1108e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer,
1109e22bee78STejun Heo 				  jiffies + IDLE_WORKER_TIMEOUT);
1110e22bee78STejun Heo 	} else
1111db7bccf4STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1112cb444766STejun Heo 
1113cb444766STejun Heo 	/* sanity check nr_running */
1114cb444766STejun Heo 	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1115cb444766STejun Heo 		     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1116c8e55f36STejun Heo }
1117c8e55f36STejun Heo 
1118c8e55f36STejun Heo /**
1119c8e55f36STejun Heo  * worker_leave_idle - leave idle state
1120c8e55f36STejun Heo  * @worker: worker which is leaving idle state
1121c8e55f36STejun Heo  *
1122c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
1123c8e55f36STejun Heo  *
1124c8e55f36STejun Heo  * LOCKING:
1125c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1126c8e55f36STejun Heo  */
1127c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
1128c8e55f36STejun Heo {
1129c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1130c8e55f36STejun Heo 
1131c8e55f36STejun Heo 	BUG_ON(!(worker->flags & WORKER_IDLE));
1132d302f017STejun Heo 	worker_clr_flags(worker, WORKER_IDLE);
1133c8e55f36STejun Heo 	gcwq->nr_idle--;
1134c8e55f36STejun Heo 	list_del_init(&worker->entry);
1135c8e55f36STejun Heo }
1136c8e55f36STejun Heo 
1137e22bee78STejun Heo /**
1138e22bee78STejun Heo  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1139e22bee78STejun Heo  * @worker: self
1140e22bee78STejun Heo  *
1141e22bee78STejun Heo  * Works which are scheduled while the cpu is online must at least be
1142e22bee78STejun Heo  * scheduled to a worker which is bound to the cpu so that if they are
1143e22bee78STejun Heo  * flushed from cpu callbacks while cpu is going down, they are
1144e22bee78STejun Heo  * guaranteed to execute on the cpu.
1145e22bee78STejun Heo  *
1146e22bee78STejun Heo  * This function is to be used by rogue workers and rescuers to bind
1147e22bee78STejun Heo  * themselves to the target cpu and may race with cpu going down or
1148e22bee78STejun Heo  * coming online.  kthread_bind() can't be used because it may put the
1149e22bee78STejun Heo  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1150e22bee78STejun Heo  * verbatim as it's best effort and blocking and gcwq may be
1151e22bee78STejun Heo  * [dis]associated in the meantime.
1152e22bee78STejun Heo  *
1153e22bee78STejun Heo  * This function tries set_cpus_allowed() and locks gcwq and verifies
1154e22bee78STejun Heo  * the binding against GCWQ_DISASSOCIATED which is set during
1155e22bee78STejun Heo  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1156e22bee78STejun Heo  * idle state or fetches works without dropping lock, it can guarantee
1157e22bee78STejun Heo  * the scheduling requirement described in the first paragraph.
1158e22bee78STejun Heo  *
1159e22bee78STejun Heo  * CONTEXT:
1160e22bee78STejun Heo  * Might sleep.  Called without any lock but returns with gcwq->lock
1161e22bee78STejun Heo  * held.
1162e22bee78STejun Heo  *
1163e22bee78STejun Heo  * RETURNS:
1164e22bee78STejun Heo  * %true if the associated gcwq is online (@worker is successfully
1165e22bee78STejun Heo  * bound), %false if offline.
1166e22bee78STejun Heo  */
1167e22bee78STejun Heo static bool worker_maybe_bind_and_lock(struct worker *worker)
1168e22bee78STejun Heo {
1169e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1170e22bee78STejun Heo 	struct task_struct *task = worker->task;
1171e22bee78STejun Heo 
1172e22bee78STejun Heo 	while (true) {
1173e22bee78STejun Heo 		/*
1174e22bee78STejun Heo 		 * The following call may fail, succeed or succeed
1175e22bee78STejun Heo 		 * without actually migrating the task to the cpu if
1176e22bee78STejun Heo 		 * it races with cpu hotunplug operation.  Verify
1177e22bee78STejun Heo 		 * against GCWQ_DISASSOCIATED.
1178e22bee78STejun Heo 		 */
1179f3421797STejun Heo 		if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1180e22bee78STejun Heo 			set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1181e22bee78STejun Heo 
1182e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1183e22bee78STejun Heo 		if (gcwq->flags & GCWQ_DISASSOCIATED)
1184e22bee78STejun Heo 			return false;
1185e22bee78STejun Heo 		if (task_cpu(task) == gcwq->cpu &&
1186e22bee78STejun Heo 		    cpumask_equal(&current->cpus_allowed,
1187e22bee78STejun Heo 				  get_cpu_mask(gcwq->cpu)))
1188e22bee78STejun Heo 			return true;
1189e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1190e22bee78STejun Heo 
1191e22bee78STejun Heo 		/* CPU has come up inbetween, retry migration */
1192e22bee78STejun Heo 		cpu_relax();
1193e22bee78STejun Heo 	}
1194e22bee78STejun Heo }
1195e22bee78STejun Heo 
1196e22bee78STejun Heo /*
1197e22bee78STejun Heo  * Function for worker->rebind_work used to rebind rogue busy workers
1198e22bee78STejun Heo  * to the associated cpu which is coming back online.  This is
1199e22bee78STejun Heo  * scheduled by cpu up but can race with other cpu hotplug operations
1200e22bee78STejun Heo  * and may be executed twice without intervening cpu down.
1201e22bee78STejun Heo  */
1202e22bee78STejun Heo static void worker_rebind_fn(struct work_struct *work)
1203e22bee78STejun Heo {
1204e22bee78STejun Heo 	struct worker *worker = container_of(work, struct worker, rebind_work);
1205e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1206e22bee78STejun Heo 
1207e22bee78STejun Heo 	if (worker_maybe_bind_and_lock(worker))
1208e22bee78STejun Heo 		worker_clr_flags(worker, WORKER_REBIND);
1209e22bee78STejun Heo 
1210e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1211e22bee78STejun Heo }
1212e22bee78STejun Heo 
1213c34056a3STejun Heo static struct worker *alloc_worker(void)
1214c34056a3STejun Heo {
1215c34056a3STejun Heo 	struct worker *worker;
1216c34056a3STejun Heo 
1217c34056a3STejun Heo 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1218c8e55f36STejun Heo 	if (worker) {
1219c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
1220affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
1221e22bee78STejun Heo 		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1222e22bee78STejun Heo 		/* on creation a worker is in !idle && prep state */
1223e22bee78STejun Heo 		worker->flags = WORKER_PREP;
1224c8e55f36STejun Heo 	}
1225c34056a3STejun Heo 	return worker;
1226c34056a3STejun Heo }
1227c34056a3STejun Heo 
1228c34056a3STejun Heo /**
1229c34056a3STejun Heo  * create_worker - create a new workqueue worker
12307e11629dSTejun Heo  * @gcwq: gcwq the new worker will belong to
1231c34056a3STejun Heo  * @bind: whether to set affinity to @cpu or not
1232c34056a3STejun Heo  *
12337e11629dSTejun Heo  * Create a new worker which is bound to @gcwq.  The returned worker
1234c34056a3STejun Heo  * can be started by calling start_worker() or destroyed using
1235c34056a3STejun Heo  * destroy_worker().
1236c34056a3STejun Heo  *
1237c34056a3STejun Heo  * CONTEXT:
1238c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
1239c34056a3STejun Heo  *
1240c34056a3STejun Heo  * RETURNS:
1241c34056a3STejun Heo  * Pointer to the newly created worker.
1242c34056a3STejun Heo  */
12437e11629dSTejun Heo static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1244c34056a3STejun Heo {
1245f3421797STejun Heo 	bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1246c34056a3STejun Heo 	struct worker *worker = NULL;
1247f3421797STejun Heo 	int id = -1;
1248c34056a3STejun Heo 
12498b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
12508b03ae3cSTejun Heo 	while (ida_get_new(&gcwq->worker_ida, &id)) {
12518b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
12528b03ae3cSTejun Heo 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1253c34056a3STejun Heo 			goto fail;
12548b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
1255c34056a3STejun Heo 	}
12568b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1257c34056a3STejun Heo 
1258c34056a3STejun Heo 	worker = alloc_worker();
1259c34056a3STejun Heo 	if (!worker)
1260c34056a3STejun Heo 		goto fail;
1261c34056a3STejun Heo 
12628b03ae3cSTejun Heo 	worker->gcwq = gcwq;
1263c34056a3STejun Heo 	worker->id = id;
1264c34056a3STejun Heo 
1265f3421797STejun Heo 	if (!on_unbound_cpu)
1266f3421797STejun Heo 		worker->task = kthread_create(worker_thread, worker,
1267f3421797STejun Heo 					      "kworker/%u:%d", gcwq->cpu, id);
1268f3421797STejun Heo 	else
1269f3421797STejun Heo 		worker->task = kthread_create(worker_thread, worker,
1270f3421797STejun Heo 					      "kworker/u:%d", id);
1271c34056a3STejun Heo 	if (IS_ERR(worker->task))
1272c34056a3STejun Heo 		goto fail;
1273c34056a3STejun Heo 
1274db7bccf4STejun Heo 	/*
1275db7bccf4STejun Heo 	 * A rogue worker will become a regular one if CPU comes
1276db7bccf4STejun Heo 	 * online later on.  Make sure every worker has
1277db7bccf4STejun Heo 	 * PF_THREAD_BOUND set.
1278db7bccf4STejun Heo 	 */
1279f3421797STejun Heo 	if (bind && !on_unbound_cpu)
12808b03ae3cSTejun Heo 		kthread_bind(worker->task, gcwq->cpu);
1281f3421797STejun Heo 	else {
1282db7bccf4STejun Heo 		worker->task->flags |= PF_THREAD_BOUND;
1283f3421797STejun Heo 		if (on_unbound_cpu)
1284f3421797STejun Heo 			worker->flags |= WORKER_UNBOUND;
1285f3421797STejun Heo 	}
1286c34056a3STejun Heo 
1287c34056a3STejun Heo 	return worker;
1288c34056a3STejun Heo fail:
1289c34056a3STejun Heo 	if (id >= 0) {
12908b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
12918b03ae3cSTejun Heo 		ida_remove(&gcwq->worker_ida, id);
12928b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1293c34056a3STejun Heo 	}
1294c34056a3STejun Heo 	kfree(worker);
1295c34056a3STejun Heo 	return NULL;
1296c34056a3STejun Heo }
1297c34056a3STejun Heo 
1298c34056a3STejun Heo /**
1299c34056a3STejun Heo  * start_worker - start a newly created worker
1300c34056a3STejun Heo  * @worker: worker to start
1301c34056a3STejun Heo  *
1302c8e55f36STejun Heo  * Make the gcwq aware of @worker and start it.
1303c34056a3STejun Heo  *
1304c34056a3STejun Heo  * CONTEXT:
13058b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1306c34056a3STejun Heo  */
1307c34056a3STejun Heo static void start_worker(struct worker *worker)
1308c34056a3STejun Heo {
1309cb444766STejun Heo 	worker->flags |= WORKER_STARTED;
1310c8e55f36STejun Heo 	worker->gcwq->nr_workers++;
1311c8e55f36STejun Heo 	worker_enter_idle(worker);
1312c34056a3STejun Heo 	wake_up_process(worker->task);
1313c34056a3STejun Heo }
1314c34056a3STejun Heo 
1315c34056a3STejun Heo /**
1316c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
1317c34056a3STejun Heo  * @worker: worker to be destroyed
1318c34056a3STejun Heo  *
1319c8e55f36STejun Heo  * Destroy @worker and adjust @gcwq stats accordingly.
1320c8e55f36STejun Heo  *
1321c8e55f36STejun Heo  * CONTEXT:
1322c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1323c34056a3STejun Heo  */
1324c34056a3STejun Heo static void destroy_worker(struct worker *worker)
1325c34056a3STejun Heo {
13268b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1327c34056a3STejun Heo 	int id = worker->id;
1328c34056a3STejun Heo 
1329c34056a3STejun Heo 	/* sanity check frenzy */
1330c34056a3STejun Heo 	BUG_ON(worker->current_work);
1331affee4b2STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1332c34056a3STejun Heo 
1333c8e55f36STejun Heo 	if (worker->flags & WORKER_STARTED)
1334c8e55f36STejun Heo 		gcwq->nr_workers--;
1335c8e55f36STejun Heo 	if (worker->flags & WORKER_IDLE)
1336c8e55f36STejun Heo 		gcwq->nr_idle--;
1337c8e55f36STejun Heo 
1338c8e55f36STejun Heo 	list_del_init(&worker->entry);
1339cb444766STejun Heo 	worker->flags |= WORKER_DIE;
1340c8e55f36STejun Heo 
1341c8e55f36STejun Heo 	spin_unlock_irq(&gcwq->lock);
1342c8e55f36STejun Heo 
1343c34056a3STejun Heo 	kthread_stop(worker->task);
1344c34056a3STejun Heo 	kfree(worker);
1345c34056a3STejun Heo 
13468b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
13478b03ae3cSTejun Heo 	ida_remove(&gcwq->worker_ida, id);
1348c34056a3STejun Heo }
1349c34056a3STejun Heo 
1350e22bee78STejun Heo static void idle_worker_timeout(unsigned long __gcwq)
1351e22bee78STejun Heo {
1352e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1353e22bee78STejun Heo 
1354e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1355e22bee78STejun Heo 
1356e22bee78STejun Heo 	if (too_many_workers(gcwq)) {
1357e22bee78STejun Heo 		struct worker *worker;
1358e22bee78STejun Heo 		unsigned long expires;
1359e22bee78STejun Heo 
1360e22bee78STejun Heo 		/* idle_list is kept in LIFO order, check the last one */
1361e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1362e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1363e22bee78STejun Heo 
1364e22bee78STejun Heo 		if (time_before(jiffies, expires))
1365e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1366e22bee78STejun Heo 		else {
1367e22bee78STejun Heo 			/* it's been idle for too long, wake up manager */
1368e22bee78STejun Heo 			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1369e22bee78STejun Heo 			wake_up_worker(gcwq);
1370e22bee78STejun Heo 		}
1371e22bee78STejun Heo 	}
1372e22bee78STejun Heo 
1373e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1374e22bee78STejun Heo }
1375e22bee78STejun Heo 
1376e22bee78STejun Heo static bool send_mayday(struct work_struct *work)
1377e22bee78STejun Heo {
1378e22bee78STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1379e22bee78STejun Heo 	struct workqueue_struct *wq = cwq->wq;
1380f3421797STejun Heo 	unsigned int cpu;
1381e22bee78STejun Heo 
1382e22bee78STejun Heo 	if (!(wq->flags & WQ_RESCUER))
1383e22bee78STejun Heo 		return false;
1384e22bee78STejun Heo 
1385e22bee78STejun Heo 	/* mayday mayday mayday */
1386f3421797STejun Heo 	cpu = cwq->gcwq->cpu;
1387f3421797STejun Heo 	/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1388f3421797STejun Heo 	if (cpu == WORK_CPU_UNBOUND)
1389f3421797STejun Heo 		cpu = 0;
1390f3421797STejun Heo 	if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask))
1391e22bee78STejun Heo 		wake_up_process(wq->rescuer->task);
1392e22bee78STejun Heo 	return true;
1393e22bee78STejun Heo }
1394e22bee78STejun Heo 
1395e22bee78STejun Heo static void gcwq_mayday_timeout(unsigned long __gcwq)
1396e22bee78STejun Heo {
1397e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1398e22bee78STejun Heo 	struct work_struct *work;
1399e22bee78STejun Heo 
1400e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1401e22bee78STejun Heo 
1402e22bee78STejun Heo 	if (need_to_create_worker(gcwq)) {
1403e22bee78STejun Heo 		/*
1404e22bee78STejun Heo 		 * We've been trying to create a new worker but
1405e22bee78STejun Heo 		 * haven't been successful.  We might be hitting an
1406e22bee78STejun Heo 		 * allocation deadlock.  Send distress signals to
1407e22bee78STejun Heo 		 * rescuers.
1408e22bee78STejun Heo 		 */
1409e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry)
1410e22bee78STejun Heo 			send_mayday(work);
1411e22bee78STejun Heo 	}
1412e22bee78STejun Heo 
1413e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1414e22bee78STejun Heo 
1415e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1416e22bee78STejun Heo }
1417e22bee78STejun Heo 
1418e22bee78STejun Heo /**
1419e22bee78STejun Heo  * maybe_create_worker - create a new worker if necessary
1420e22bee78STejun Heo  * @gcwq: gcwq to create a new worker for
1421e22bee78STejun Heo  *
1422e22bee78STejun Heo  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1423e22bee78STejun Heo  * have at least one idle worker on return from this function.  If
1424e22bee78STejun Heo  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1425e22bee78STejun Heo  * sent to all rescuers with works scheduled on @gcwq to resolve
1426e22bee78STejun Heo  * possible allocation deadlock.
1427e22bee78STejun Heo  *
1428e22bee78STejun Heo  * On return, need_to_create_worker() is guaranteed to be false and
1429e22bee78STejun Heo  * may_start_working() true.
1430e22bee78STejun Heo  *
1431e22bee78STejun Heo  * LOCKING:
1432e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1433e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.  Called only from
1434e22bee78STejun Heo  * manager.
1435e22bee78STejun Heo  *
1436e22bee78STejun Heo  * RETURNS:
1437e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1438e22bee78STejun Heo  * otherwise.
1439e22bee78STejun Heo  */
1440e22bee78STejun Heo static bool maybe_create_worker(struct global_cwq *gcwq)
1441e22bee78STejun Heo {
1442e22bee78STejun Heo 	if (!need_to_create_worker(gcwq))
1443e22bee78STejun Heo 		return false;
1444e22bee78STejun Heo restart:
1445e22bee78STejun Heo 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1446e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1447e22bee78STejun Heo 
1448e22bee78STejun Heo 	while (true) {
1449e22bee78STejun Heo 		struct worker *worker;
1450e22bee78STejun Heo 
1451e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1452e22bee78STejun Heo 
1453e22bee78STejun Heo 		worker = create_worker(gcwq, true);
1454e22bee78STejun Heo 		if (worker) {
1455e22bee78STejun Heo 			del_timer_sync(&gcwq->mayday_timer);
1456e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
1457e22bee78STejun Heo 			start_worker(worker);
1458e22bee78STejun Heo 			BUG_ON(need_to_create_worker(gcwq));
1459e22bee78STejun Heo 			return true;
1460e22bee78STejun Heo 		}
1461e22bee78STejun Heo 
1462e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1463e22bee78STejun Heo 			break;
1464e22bee78STejun Heo 
1465e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1466e22bee78STejun Heo 		__set_current_state(TASK_INTERRUPTIBLE);
1467e22bee78STejun Heo 		schedule_timeout(CREATE_COOLDOWN);
1468e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1469e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1470e22bee78STejun Heo 			break;
1471e22bee78STejun Heo 	}
1472e22bee78STejun Heo 
1473e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1474e22bee78STejun Heo 	del_timer_sync(&gcwq->mayday_timer);
1475e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1476e22bee78STejun Heo 	if (need_to_create_worker(gcwq))
1477e22bee78STejun Heo 		goto restart;
1478e22bee78STejun Heo 	return true;
1479e22bee78STejun Heo }
1480e22bee78STejun Heo 
1481e22bee78STejun Heo /**
1482e22bee78STejun Heo  * maybe_destroy_worker - destroy workers which have been idle for a while
1483e22bee78STejun Heo  * @gcwq: gcwq to destroy workers for
1484e22bee78STejun Heo  *
1485e22bee78STejun Heo  * Destroy @gcwq workers which have been idle for longer than
1486e22bee78STejun Heo  * IDLE_WORKER_TIMEOUT.
1487e22bee78STejun Heo  *
1488e22bee78STejun Heo  * LOCKING:
1489e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1490e22bee78STejun Heo  * multiple times.  Called only from manager.
1491e22bee78STejun Heo  *
1492e22bee78STejun Heo  * RETURNS:
1493e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1494e22bee78STejun Heo  * otherwise.
1495e22bee78STejun Heo  */
1496e22bee78STejun Heo static bool maybe_destroy_workers(struct global_cwq *gcwq)
1497e22bee78STejun Heo {
1498e22bee78STejun Heo 	bool ret = false;
1499e22bee78STejun Heo 
1500e22bee78STejun Heo 	while (too_many_workers(gcwq)) {
1501e22bee78STejun Heo 		struct worker *worker;
1502e22bee78STejun Heo 		unsigned long expires;
1503e22bee78STejun Heo 
1504e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1505e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1506e22bee78STejun Heo 
1507e22bee78STejun Heo 		if (time_before(jiffies, expires)) {
1508e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1509e22bee78STejun Heo 			break;
1510e22bee78STejun Heo 		}
1511e22bee78STejun Heo 
1512e22bee78STejun Heo 		destroy_worker(worker);
1513e22bee78STejun Heo 		ret = true;
1514e22bee78STejun Heo 	}
1515e22bee78STejun Heo 
1516e22bee78STejun Heo 	return ret;
1517e22bee78STejun Heo }
1518e22bee78STejun Heo 
1519e22bee78STejun Heo /**
1520e22bee78STejun Heo  * manage_workers - manage worker pool
1521e22bee78STejun Heo  * @worker: self
1522e22bee78STejun Heo  *
1523e22bee78STejun Heo  * Assume the manager role and manage gcwq worker pool @worker belongs
1524e22bee78STejun Heo  * to.  At any given time, there can be only zero or one manager per
1525e22bee78STejun Heo  * gcwq.  The exclusion is handled automatically by this function.
1526e22bee78STejun Heo  *
1527e22bee78STejun Heo  * The caller can safely start processing works on false return.  On
1528e22bee78STejun Heo  * true return, it's guaranteed that need_to_create_worker() is false
1529e22bee78STejun Heo  * and may_start_working() is true.
1530e22bee78STejun Heo  *
1531e22bee78STejun Heo  * CONTEXT:
1532e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1533e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.
1534e22bee78STejun Heo  *
1535e22bee78STejun Heo  * RETURNS:
1536e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true if
1537e22bee78STejun Heo  * some action was taken.
1538e22bee78STejun Heo  */
1539e22bee78STejun Heo static bool manage_workers(struct worker *worker)
1540e22bee78STejun Heo {
1541e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1542e22bee78STejun Heo 	bool ret = false;
1543e22bee78STejun Heo 
1544e22bee78STejun Heo 	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1545e22bee78STejun Heo 		return ret;
1546e22bee78STejun Heo 
1547e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1548e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1549e22bee78STejun Heo 
1550e22bee78STejun Heo 	/*
1551e22bee78STejun Heo 	 * Destroy and then create so that may_start_working() is true
1552e22bee78STejun Heo 	 * on return.
1553e22bee78STejun Heo 	 */
1554e22bee78STejun Heo 	ret |= maybe_destroy_workers(gcwq);
1555e22bee78STejun Heo 	ret |= maybe_create_worker(gcwq);
1556e22bee78STejun Heo 
1557e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1558e22bee78STejun Heo 
1559e22bee78STejun Heo 	/*
1560e22bee78STejun Heo 	 * The trustee might be waiting to take over the manager
1561e22bee78STejun Heo 	 * position, tell it we're done.
1562e22bee78STejun Heo 	 */
1563e22bee78STejun Heo 	if (unlikely(gcwq->trustee))
1564e22bee78STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1565e22bee78STejun Heo 
1566e22bee78STejun Heo 	return ret;
1567e22bee78STejun Heo }
1568e22bee78STejun Heo 
1569a62428c0STejun Heo /**
1570affee4b2STejun Heo  * move_linked_works - move linked works to a list
1571affee4b2STejun Heo  * @work: start of series of works to be scheduled
1572affee4b2STejun Heo  * @head: target list to append @work to
1573affee4b2STejun Heo  * @nextp: out paramter for nested worklist walking
1574affee4b2STejun Heo  *
1575affee4b2STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
1576affee4b2STejun Heo  * be scheduled starts at @work and includes any consecutive work with
1577affee4b2STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
1578affee4b2STejun Heo  *
1579affee4b2STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
1580affee4b2STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
1581affee4b2STejun Heo  * nested inside outer list_for_each_entry_safe().
1582affee4b2STejun Heo  *
1583affee4b2STejun Heo  * CONTEXT:
15848b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1585affee4b2STejun Heo  */
1586affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
1587affee4b2STejun Heo 			      struct work_struct **nextp)
1588affee4b2STejun Heo {
1589affee4b2STejun Heo 	struct work_struct *n;
1590affee4b2STejun Heo 
1591affee4b2STejun Heo 	/*
1592affee4b2STejun Heo 	 * Linked worklist will always end before the end of the list,
1593affee4b2STejun Heo 	 * use NULL for list head.
1594affee4b2STejun Heo 	 */
1595affee4b2STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1596affee4b2STejun Heo 		list_move_tail(&work->entry, head);
1597affee4b2STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1598affee4b2STejun Heo 			break;
1599affee4b2STejun Heo 	}
1600affee4b2STejun Heo 
1601affee4b2STejun Heo 	/*
1602affee4b2STejun Heo 	 * If we're already inside safe list traversal and have moved
1603affee4b2STejun Heo 	 * multiple works to the scheduled queue, the next position
1604affee4b2STejun Heo 	 * needs to be updated.
1605affee4b2STejun Heo 	 */
1606affee4b2STejun Heo 	if (nextp)
1607affee4b2STejun Heo 		*nextp = n;
1608affee4b2STejun Heo }
1609affee4b2STejun Heo 
16101e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
16111e19ffc6STejun Heo {
16121e19ffc6STejun Heo 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
16131e19ffc6STejun Heo 						    struct work_struct, entry);
1614649027d7STejun Heo 	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
16151e19ffc6STejun Heo 
1616649027d7STejun Heo 	move_linked_works(work, pos, NULL);
16171e19ffc6STejun Heo 	cwq->nr_active++;
16181e19ffc6STejun Heo }
16191e19ffc6STejun Heo 
1620affee4b2STejun Heo /**
162173f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
162273f53c4aSTejun Heo  * @cwq: cwq of interest
162373f53c4aSTejun Heo  * @color: color of work which left the queue
162473f53c4aSTejun Heo  *
162573f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
162673f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
162773f53c4aSTejun Heo  *
162873f53c4aSTejun Heo  * CONTEXT:
16298b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
163073f53c4aSTejun Heo  */
163173f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
163273f53c4aSTejun Heo {
163373f53c4aSTejun Heo 	/* ignore uncolored works */
163473f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
163573f53c4aSTejun Heo 		return;
163673f53c4aSTejun Heo 
163773f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
16381e19ffc6STejun Heo 	cwq->nr_active--;
16391e19ffc6STejun Heo 
1640502ca9d8STejun Heo 	if (!list_empty(&cwq->delayed_works)) {
16411e19ffc6STejun Heo 		/* one down, submit a delayed one */
1642502ca9d8STejun Heo 		if (cwq->nr_active < cwq->max_active)
16431e19ffc6STejun Heo 			cwq_activate_first_delayed(cwq);
1644502ca9d8STejun Heo 	}
164573f53c4aSTejun Heo 
164673f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
164773f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
164873f53c4aSTejun Heo 		return;
164973f53c4aSTejun Heo 
165073f53c4aSTejun Heo 	/* are there still in-flight works? */
165173f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
165273f53c4aSTejun Heo 		return;
165373f53c4aSTejun Heo 
165473f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
165573f53c4aSTejun Heo 	cwq->flush_color = -1;
165673f53c4aSTejun Heo 
165773f53c4aSTejun Heo 	/*
165873f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
165973f53c4aSTejun Heo 	 * will handle the rest.
166073f53c4aSTejun Heo 	 */
166173f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
166273f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
166373f53c4aSTejun Heo }
166473f53c4aSTejun Heo 
166573f53c4aSTejun Heo /**
1666a62428c0STejun Heo  * process_one_work - process single work
1667c34056a3STejun Heo  * @worker: self
1668a62428c0STejun Heo  * @work: work to process
1669a62428c0STejun Heo  *
1670a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
1671a62428c0STejun Heo  * process a single work including synchronization against and
1672a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
1673a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
1674a62428c0STejun Heo  * call this function to process a work.
1675a62428c0STejun Heo  *
1676a62428c0STejun Heo  * CONTEXT:
16778b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1678a62428c0STejun Heo  */
1679c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
16801da177e4SLinus Torvalds {
16817e11629dSTejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
16828b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
1683c8e55f36STejun Heo 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
1684fb0e7bebSTejun Heo 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
16856bb49e59SDavid Howells 	work_func_t f = work->func;
168673f53c4aSTejun Heo 	int work_color;
16877e11629dSTejun Heo 	struct worker *collision;
16884e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
16894e6045f1SJohannes Berg 	/*
1690a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
1691a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
1692a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
1693a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
1694a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
16954e6045f1SJohannes Berg 	 */
16964e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
16974e6045f1SJohannes Berg #endif
16987e11629dSTejun Heo 	/*
16997e11629dSTejun Heo 	 * A single work shouldn't be executed concurrently by
17007e11629dSTejun Heo 	 * multiple workers on a single cpu.  Check whether anyone is
17017e11629dSTejun Heo 	 * already processing the work.  If so, defer the work to the
17027e11629dSTejun Heo 	 * currently executing one.
17037e11629dSTejun Heo 	 */
17047e11629dSTejun Heo 	collision = __find_worker_executing_work(gcwq, bwh, work);
17057e11629dSTejun Heo 	if (unlikely(collision)) {
17067e11629dSTejun Heo 		move_linked_works(work, &collision->scheduled, NULL);
17077e11629dSTejun Heo 		return;
17087e11629dSTejun Heo 	}
17097e11629dSTejun Heo 
1710a62428c0STejun Heo 	/* claim and process */
1711dc186ad7SThomas Gleixner 	debug_work_deactivate(work);
1712c8e55f36STejun Heo 	hlist_add_head(&worker->hentry, bwh);
1713c34056a3STejun Heo 	worker->current_work = work;
17148cca0eeaSTejun Heo 	worker->current_cwq = cwq;
171573f53c4aSTejun Heo 	work_color = get_work_color(work);
17167a22ad75STejun Heo 
17177a22ad75STejun Heo 	/* record the current cpu number in the work data and dequeue */
17187a22ad75STejun Heo 	set_work_cpu(work, gcwq->cpu);
1719a62428c0STejun Heo 	list_del_init(&work->entry);
1720a62428c0STejun Heo 
1721649027d7STejun Heo 	/*
1722649027d7STejun Heo 	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1723649027d7STejun Heo 	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1724649027d7STejun Heo 	 */
1725649027d7STejun Heo 	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1726649027d7STejun Heo 		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1727649027d7STejun Heo 						struct work_struct, entry);
1728649027d7STejun Heo 
1729649027d7STejun Heo 		if (!list_empty(&gcwq->worklist) &&
1730649027d7STejun Heo 		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1731649027d7STejun Heo 			wake_up_worker(gcwq);
1732649027d7STejun Heo 		else
1733649027d7STejun Heo 			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1734649027d7STejun Heo 	}
1735649027d7STejun Heo 
1736fb0e7bebSTejun Heo 	/*
1737fb0e7bebSTejun Heo 	 * CPU intensive works don't participate in concurrency
1738fb0e7bebSTejun Heo 	 * management.  They're the scheduler's responsibility.
1739fb0e7bebSTejun Heo 	 */
1740fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
1741fb0e7bebSTejun Heo 		worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1742fb0e7bebSTejun Heo 
17438b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
17441da177e4SLinus Torvalds 
174523b2e599SOleg Nesterov 	work_clear_pending(work);
17463295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
17473295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
174865f27f38SDavid Howells 	f(work);
17493295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
17503295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
17511da177e4SLinus Torvalds 
1752d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1753d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1754d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
1755a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
1756d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
1757d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
1758d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
1759d5abe669SPeter Zijlstra 		dump_stack();
1760d5abe669SPeter Zijlstra 	}
1761d5abe669SPeter Zijlstra 
17628b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1763a62428c0STejun Heo 
1764fb0e7bebSTejun Heo 	/* clear cpu intensive status */
1765fb0e7bebSTejun Heo 	if (unlikely(cpu_intensive))
1766fb0e7bebSTejun Heo 		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1767fb0e7bebSTejun Heo 
1768a62428c0STejun Heo 	/* we're done with it, release */
1769c8e55f36STejun Heo 	hlist_del_init(&worker->hentry);
1770c34056a3STejun Heo 	worker->current_work = NULL;
17718cca0eeaSTejun Heo 	worker->current_cwq = NULL;
177273f53c4aSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color);
17731da177e4SLinus Torvalds }
1774a62428c0STejun Heo 
1775affee4b2STejun Heo /**
1776affee4b2STejun Heo  * process_scheduled_works - process scheduled works
1777affee4b2STejun Heo  * @worker: self
1778affee4b2STejun Heo  *
1779affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
1780affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
1781affee4b2STejun Heo  * fetches a work from the top and executes it.
1782affee4b2STejun Heo  *
1783affee4b2STejun Heo  * CONTEXT:
17848b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1785affee4b2STejun Heo  * multiple times.
1786affee4b2STejun Heo  */
1787affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
1788a62428c0STejun Heo {
1789affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
1790affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
1791a62428c0STejun Heo 						struct work_struct, entry);
1792c34056a3STejun Heo 		process_one_work(worker, work);
1793a62428c0STejun Heo 	}
17941da177e4SLinus Torvalds }
17951da177e4SLinus Torvalds 
17964690c4abSTejun Heo /**
17974690c4abSTejun Heo  * worker_thread - the worker thread function
1798c34056a3STejun Heo  * @__worker: self
17994690c4abSTejun Heo  *
1800e22bee78STejun Heo  * The gcwq worker thread function.  There's a single dynamic pool of
1801e22bee78STejun Heo  * these per each cpu.  These workers process all works regardless of
1802e22bee78STejun Heo  * their specific target workqueue.  The only exception is works which
1803e22bee78STejun Heo  * belong to workqueues with a rescuer which will be explained in
1804e22bee78STejun Heo  * rescuer_thread().
18054690c4abSTejun Heo  */
1806c34056a3STejun Heo static int worker_thread(void *__worker)
18071da177e4SLinus Torvalds {
1808c34056a3STejun Heo 	struct worker *worker = __worker;
18098b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
18101da177e4SLinus Torvalds 
1811e22bee78STejun Heo 	/* tell the scheduler that this is a workqueue worker */
1812e22bee78STejun Heo 	worker->task->flags |= PF_WQ_WORKER;
1813c8e55f36STejun Heo woke_up:
18148b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1815affee4b2STejun Heo 
1816c8e55f36STejun Heo 	/* DIE can be set only while we're idle, checking here is enough */
1817c8e55f36STejun Heo 	if (worker->flags & WORKER_DIE) {
1818c8e55f36STejun Heo 		spin_unlock_irq(&gcwq->lock);
1819e22bee78STejun Heo 		worker->task->flags &= ~PF_WQ_WORKER;
1820c8e55f36STejun Heo 		return 0;
1821c8e55f36STejun Heo 	}
1822c8e55f36STejun Heo 
1823c8e55f36STejun Heo 	worker_leave_idle(worker);
1824db7bccf4STejun Heo recheck:
1825e22bee78STejun Heo 	/* no more worker necessary? */
1826e22bee78STejun Heo 	if (!need_more_worker(gcwq))
1827e22bee78STejun Heo 		goto sleep;
1828e22bee78STejun Heo 
1829e22bee78STejun Heo 	/* do we need to manage? */
1830e22bee78STejun Heo 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1831e22bee78STejun Heo 		goto recheck;
1832e22bee78STejun Heo 
1833c8e55f36STejun Heo 	/*
1834c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
1835c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
1836c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
1837c8e55f36STejun Heo 	 */
1838c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1839c8e55f36STejun Heo 
1840e22bee78STejun Heo 	/*
1841e22bee78STejun Heo 	 * When control reaches this point, we're guaranteed to have
1842e22bee78STejun Heo 	 * at least one idle worker or that someone else has already
1843e22bee78STejun Heo 	 * assumed the manager role.
1844e22bee78STejun Heo 	 */
1845e22bee78STejun Heo 	worker_clr_flags(worker, WORKER_PREP);
1846e22bee78STejun Heo 
1847e22bee78STejun Heo 	do {
1848affee4b2STejun Heo 		struct work_struct *work =
18497e11629dSTejun Heo 			list_first_entry(&gcwq->worklist,
1850affee4b2STejun Heo 					 struct work_struct, entry);
1851affee4b2STejun Heo 
1852c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1853affee4b2STejun Heo 			/* optimization path, not strictly necessary */
1854affee4b2STejun Heo 			process_one_work(worker, work);
1855affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
1856affee4b2STejun Heo 				process_scheduled_works(worker);
1857affee4b2STejun Heo 		} else {
1858c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
1859affee4b2STejun Heo 			process_scheduled_works(worker);
1860affee4b2STejun Heo 		}
1861e22bee78STejun Heo 	} while (keep_working(gcwq));
1862affee4b2STejun Heo 
1863e22bee78STejun Heo 	worker_set_flags(worker, WORKER_PREP, false);
1864d313dd85STejun Heo sleep:
1865e22bee78STejun Heo 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1866e22bee78STejun Heo 		goto recheck;
1867d313dd85STejun Heo 
1868c8e55f36STejun Heo 	/*
1869e22bee78STejun Heo 	 * gcwq->lock is held and there's no work to process and no
1870e22bee78STejun Heo 	 * need to manage, sleep.  Workers are woken up only while
1871e22bee78STejun Heo 	 * holding gcwq->lock or from local cpu, so setting the
1872e22bee78STejun Heo 	 * current state before releasing gcwq->lock is enough to
1873e22bee78STejun Heo 	 * prevent losing any event.
1874c8e55f36STejun Heo 	 */
1875c8e55f36STejun Heo 	worker_enter_idle(worker);
1876c8e55f36STejun Heo 	__set_current_state(TASK_INTERRUPTIBLE);
18778b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1878c8e55f36STejun Heo 	schedule();
1879c8e55f36STejun Heo 	goto woke_up;
18801da177e4SLinus Torvalds }
18811da177e4SLinus Torvalds 
1882e22bee78STejun Heo /**
1883e22bee78STejun Heo  * rescuer_thread - the rescuer thread function
1884e22bee78STejun Heo  * @__wq: the associated workqueue
1885e22bee78STejun Heo  *
1886e22bee78STejun Heo  * Workqueue rescuer thread function.  There's one rescuer for each
1887e22bee78STejun Heo  * workqueue which has WQ_RESCUER set.
1888e22bee78STejun Heo  *
1889e22bee78STejun Heo  * Regular work processing on a gcwq may block trying to create a new
1890e22bee78STejun Heo  * worker which uses GFP_KERNEL allocation which has slight chance of
1891e22bee78STejun Heo  * developing into deadlock if some works currently on the same queue
1892e22bee78STejun Heo  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
1893e22bee78STejun Heo  * the problem rescuer solves.
1894e22bee78STejun Heo  *
1895e22bee78STejun Heo  * When such condition is possible, the gcwq summons rescuers of all
1896e22bee78STejun Heo  * workqueues which have works queued on the gcwq and let them process
1897e22bee78STejun Heo  * those works so that forward progress can be guaranteed.
1898e22bee78STejun Heo  *
1899e22bee78STejun Heo  * This should happen rarely.
1900e22bee78STejun Heo  */
1901e22bee78STejun Heo static int rescuer_thread(void *__wq)
1902e22bee78STejun Heo {
1903e22bee78STejun Heo 	struct workqueue_struct *wq = __wq;
1904e22bee78STejun Heo 	struct worker *rescuer = wq->rescuer;
1905e22bee78STejun Heo 	struct list_head *scheduled = &rescuer->scheduled;
1906f3421797STejun Heo 	bool is_unbound = wq->flags & WQ_UNBOUND;
1907e22bee78STejun Heo 	unsigned int cpu;
1908e22bee78STejun Heo 
1909e22bee78STejun Heo 	set_user_nice(current, RESCUER_NICE_LEVEL);
1910e22bee78STejun Heo repeat:
1911e22bee78STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);
1912e22bee78STejun Heo 
1913e22bee78STejun Heo 	if (kthread_should_stop())
1914e22bee78STejun Heo 		return 0;
1915e22bee78STejun Heo 
1916f3421797STejun Heo 	/*
1917f3421797STejun Heo 	 * See whether any cpu is asking for help.  Unbounded
1918f3421797STejun Heo 	 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1919f3421797STejun Heo 	 */
1920e22bee78STejun Heo 	for_each_cpu(cpu, wq->mayday_mask) {
1921f3421797STejun Heo 		unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
1922f3421797STejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
1923e22bee78STejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
1924e22bee78STejun Heo 		struct work_struct *work, *n;
1925e22bee78STejun Heo 
1926e22bee78STejun Heo 		__set_current_state(TASK_RUNNING);
1927e22bee78STejun Heo 		cpumask_clear_cpu(cpu, wq->mayday_mask);
1928e22bee78STejun Heo 
1929e22bee78STejun Heo 		/* migrate to the target cpu if possible */
1930e22bee78STejun Heo 		rescuer->gcwq = gcwq;
1931e22bee78STejun Heo 		worker_maybe_bind_and_lock(rescuer);
1932e22bee78STejun Heo 
1933e22bee78STejun Heo 		/*
1934e22bee78STejun Heo 		 * Slurp in all works issued via this workqueue and
1935e22bee78STejun Heo 		 * process'em.
1936e22bee78STejun Heo 		 */
1937e22bee78STejun Heo 		BUG_ON(!list_empty(&rescuer->scheduled));
1938e22bee78STejun Heo 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
1939e22bee78STejun Heo 			if (get_work_cwq(work) == cwq)
1940e22bee78STejun Heo 				move_linked_works(work, scheduled, &n);
1941e22bee78STejun Heo 
1942e22bee78STejun Heo 		process_scheduled_works(rescuer);
1943e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1944e22bee78STejun Heo 	}
1945e22bee78STejun Heo 
1946e22bee78STejun Heo 	schedule();
1947e22bee78STejun Heo 	goto repeat;
1948e22bee78STejun Heo }
1949e22bee78STejun Heo 
1950fc2e4d70SOleg Nesterov struct wq_barrier {
1951fc2e4d70SOleg Nesterov 	struct work_struct	work;
1952fc2e4d70SOleg Nesterov 	struct completion	done;
1953fc2e4d70SOleg Nesterov };
1954fc2e4d70SOleg Nesterov 
1955fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
1956fc2e4d70SOleg Nesterov {
1957fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1958fc2e4d70SOleg Nesterov 	complete(&barr->done);
1959fc2e4d70SOleg Nesterov }
1960fc2e4d70SOleg Nesterov 
19614690c4abSTejun Heo /**
19624690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
19634690c4abSTejun Heo  * @cwq: cwq to insert barrier into
19644690c4abSTejun Heo  * @barr: wq_barrier to insert
1965affee4b2STejun Heo  * @target: target work to attach @barr to
1966affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
19674690c4abSTejun Heo  *
1968affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
1969affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
1970affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
1971affee4b2STejun Heo  * cpu.
1972affee4b2STejun Heo  *
1973affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
1974affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
1975affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
1976affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
1977affee4b2STejun Heo  * after a work with LINKED flag set.
1978affee4b2STejun Heo  *
1979affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
1980affee4b2STejun Heo  * underneath us, so we can't reliably determine cwq from @target.
19814690c4abSTejun Heo  *
19824690c4abSTejun Heo  * CONTEXT:
19838b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
19844690c4abSTejun Heo  */
198583c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1986affee4b2STejun Heo 			      struct wq_barrier *barr,
1987affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
1988fc2e4d70SOleg Nesterov {
1989affee4b2STejun Heo 	struct list_head *head;
1990affee4b2STejun Heo 	unsigned int linked = 0;
1991affee4b2STejun Heo 
1992dc186ad7SThomas Gleixner 	/*
19938b03ae3cSTejun Heo 	 * debugobject calls are safe here even with gcwq->lock locked
1994dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
1995dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
1996dc186ad7SThomas Gleixner 	 * might deadlock.
1997dc186ad7SThomas Gleixner 	 */
1998dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
199922df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2000fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
200183c22520SOleg Nesterov 
2002affee4b2STejun Heo 	/*
2003affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
2004affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
2005affee4b2STejun Heo 	 */
2006affee4b2STejun Heo 	if (worker)
2007affee4b2STejun Heo 		head = worker->scheduled.next;
2008affee4b2STejun Heo 	else {
2009affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
2010affee4b2STejun Heo 
2011affee4b2STejun Heo 		head = target->entry.next;
2012affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
2013affee4b2STejun Heo 		linked = *bits & WORK_STRUCT_LINKED;
2014affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
2015affee4b2STejun Heo 	}
2016affee4b2STejun Heo 
2017dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
2018affee4b2STejun Heo 	insert_work(cwq, &barr->work, head,
2019affee4b2STejun Heo 		    work_color_to_flags(WORK_NO_COLOR) | linked);
2020fc2e4d70SOleg Nesterov }
2021fc2e4d70SOleg Nesterov 
202273f53c4aSTejun Heo /**
202373f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
202473f53c4aSTejun Heo  * @wq: workqueue being flushed
202573f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
202673f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
202773f53c4aSTejun Heo  *
202873f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
202973f53c4aSTejun Heo  *
203073f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
203173f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
203273f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
203373f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
203473f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
203573f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
203673f53c4aSTejun Heo  *
203773f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
203873f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
203973f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
204073f53c4aSTejun Heo  * is returned.
204173f53c4aSTejun Heo  *
204273f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
204373f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
204473f53c4aSTejun Heo  * advanced to @work_color.
204573f53c4aSTejun Heo  *
204673f53c4aSTejun Heo  * CONTEXT:
204773f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
204873f53c4aSTejun Heo  *
204973f53c4aSTejun Heo  * RETURNS:
205073f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
205173f53c4aSTejun Heo  * otherwise.
205273f53c4aSTejun Heo  */
205373f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
205473f53c4aSTejun Heo 				      int flush_color, int work_color)
20551da177e4SLinus Torvalds {
205673f53c4aSTejun Heo 	bool wait = false;
205773f53c4aSTejun Heo 	unsigned int cpu;
20581da177e4SLinus Torvalds 
205973f53c4aSTejun Heo 	if (flush_color >= 0) {
206073f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
206173f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
206273f53c4aSTejun Heo 	}
206373f53c4aSTejun Heo 
2064f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
206573f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
20668b03ae3cSTejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
20672355b70fSLai Jiangshan 
20688b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
206973f53c4aSTejun Heo 
207073f53c4aSTejun Heo 		if (flush_color >= 0) {
207173f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
207273f53c4aSTejun Heo 
207373f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
207473f53c4aSTejun Heo 				cwq->flush_color = flush_color;
207573f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
207673f53c4aSTejun Heo 				wait = true;
207783c22520SOleg Nesterov 			}
207873f53c4aSTejun Heo 		}
207973f53c4aSTejun Heo 
208073f53c4aSTejun Heo 		if (work_color >= 0) {
208173f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
208273f53c4aSTejun Heo 			cwq->work_color = work_color;
208373f53c4aSTejun Heo 		}
208473f53c4aSTejun Heo 
20858b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2086dc186ad7SThomas Gleixner 	}
208714441960SOleg Nesterov 
208873f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
208973f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
209073f53c4aSTejun Heo 
209173f53c4aSTejun Heo 	return wait;
209283c22520SOleg Nesterov }
20931da177e4SLinus Torvalds 
20940fcb78c2SRolf Eike Beer /**
20951da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
20960fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
20971da177e4SLinus Torvalds  *
20981da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
20991da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
21001da177e4SLinus Torvalds  *
2101fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
2102fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
21031da177e4SLinus Torvalds  */
21047ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
21051da177e4SLinus Torvalds {
210673f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
210773f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
210873f53c4aSTejun Heo 		.flush_color = -1,
210973f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
211073f53c4aSTejun Heo 	};
211173f53c4aSTejun Heo 	int next_color;
2112b1f4ec17SOleg Nesterov 
21133295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
21143295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
211573f53c4aSTejun Heo 
211673f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
211773f53c4aSTejun Heo 
211873f53c4aSTejun Heo 	/*
211973f53c4aSTejun Heo 	 * Start-to-wait phase
212073f53c4aSTejun Heo 	 */
212173f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
212273f53c4aSTejun Heo 
212373f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
212473f53c4aSTejun Heo 		/*
212573f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
212673f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
212773f53c4aSTejun Heo 		 * by one.
212873f53c4aSTejun Heo 		 */
212973f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
213073f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
213173f53c4aSTejun Heo 		wq->work_color = next_color;
213273f53c4aSTejun Heo 
213373f53c4aSTejun Heo 		if (!wq->first_flusher) {
213473f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
213573f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
213673f53c4aSTejun Heo 
213773f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
213873f53c4aSTejun Heo 
213973f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
214073f53c4aSTejun Heo 						       wq->work_color)) {
214173f53c4aSTejun Heo 				/* nothing to flush, done */
214273f53c4aSTejun Heo 				wq->flush_color = next_color;
214373f53c4aSTejun Heo 				wq->first_flusher = NULL;
214473f53c4aSTejun Heo 				goto out_unlock;
214573f53c4aSTejun Heo 			}
214673f53c4aSTejun Heo 		} else {
214773f53c4aSTejun Heo 			/* wait in queue */
214873f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
214973f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
215073f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
215173f53c4aSTejun Heo 		}
215273f53c4aSTejun Heo 	} else {
215373f53c4aSTejun Heo 		/*
215473f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
215573f53c4aSTejun Heo 		 * The next flush completion will assign us
215673f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
215773f53c4aSTejun Heo 		 */
215873f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
215973f53c4aSTejun Heo 	}
216073f53c4aSTejun Heo 
216173f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
216273f53c4aSTejun Heo 
216373f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
216473f53c4aSTejun Heo 
216573f53c4aSTejun Heo 	/*
216673f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
216773f53c4aSTejun Heo 	 *
216873f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
216973f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
217073f53c4aSTejun Heo 	 */
217173f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
217273f53c4aSTejun Heo 		return;
217373f53c4aSTejun Heo 
217473f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
217573f53c4aSTejun Heo 
21764ce48b37STejun Heo 	/* we might have raced, check again with mutex held */
21774ce48b37STejun Heo 	if (wq->first_flusher != &this_flusher)
21784ce48b37STejun Heo 		goto out_unlock;
21794ce48b37STejun Heo 
218073f53c4aSTejun Heo 	wq->first_flusher = NULL;
218173f53c4aSTejun Heo 
218273f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
218373f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
218473f53c4aSTejun Heo 
218573f53c4aSTejun Heo 	while (true) {
218673f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
218773f53c4aSTejun Heo 
218873f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
218973f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
219073f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
219173f53c4aSTejun Heo 				break;
219273f53c4aSTejun Heo 			list_del_init(&next->list);
219373f53c4aSTejun Heo 			complete(&next->done);
219473f53c4aSTejun Heo 		}
219573f53c4aSTejun Heo 
219673f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
219773f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
219873f53c4aSTejun Heo 
219973f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
220073f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
220173f53c4aSTejun Heo 
220273f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
220373f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
220473f53c4aSTejun Heo 			/*
220573f53c4aSTejun Heo 			 * Assign the same color to all overflowed
220673f53c4aSTejun Heo 			 * flushers, advance work_color and append to
220773f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
220873f53c4aSTejun Heo 			 * phase for these overflowed flushers.
220973f53c4aSTejun Heo 			 */
221073f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
221173f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
221273f53c4aSTejun Heo 
221373f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
221473f53c4aSTejun Heo 
221573f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
221673f53c4aSTejun Heo 					      &wq->flusher_queue);
221773f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
221873f53c4aSTejun Heo 		}
221973f53c4aSTejun Heo 
222073f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
222173f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
222273f53c4aSTejun Heo 			break;
222373f53c4aSTejun Heo 		}
222473f53c4aSTejun Heo 
222573f53c4aSTejun Heo 		/*
222673f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
222773f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
222873f53c4aSTejun Heo 		 */
222973f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
223073f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
223173f53c4aSTejun Heo 
223273f53c4aSTejun Heo 		list_del_init(&next->list);
223373f53c4aSTejun Heo 		wq->first_flusher = next;
223473f53c4aSTejun Heo 
223573f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
223673f53c4aSTejun Heo 			break;
223773f53c4aSTejun Heo 
223873f53c4aSTejun Heo 		/*
223973f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
224073f53c4aSTejun Heo 		 * flusher and repeat cascading.
224173f53c4aSTejun Heo 		 */
224273f53c4aSTejun Heo 		wq->first_flusher = NULL;
224373f53c4aSTejun Heo 	}
224473f53c4aSTejun Heo 
224573f53c4aSTejun Heo out_unlock:
224673f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
22471da177e4SLinus Torvalds }
2248ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
22491da177e4SLinus Torvalds 
2250db700897SOleg Nesterov /**
2251db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
2252db700897SOleg Nesterov  * @work: the work which is to be flushed
2253db700897SOleg Nesterov  *
2254a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
2255a67da70dSOleg Nesterov  *
2256db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
2257db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
2258db700897SOleg Nesterov  * sense to use this function.
2259db700897SOleg Nesterov  */
2260db700897SOleg Nesterov int flush_work(struct work_struct *work)
2261db700897SOleg Nesterov {
2262affee4b2STejun Heo 	struct worker *worker = NULL;
22638b03ae3cSTejun Heo 	struct global_cwq *gcwq;
22647a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq;
2265db700897SOleg Nesterov 	struct wq_barrier barr;
2266db700897SOleg Nesterov 
2267db700897SOleg Nesterov 	might_sleep();
22687a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
22697a22ad75STejun Heo 	if (!gcwq)
2270db700897SOleg Nesterov 		return 0;
2271a67da70dSOleg Nesterov 
22728b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2273db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
2274db700897SOleg Nesterov 		/*
2275db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
22767a22ad75STejun Heo 		 * If it was re-queued to a different gcwq under us, we
22777a22ad75STejun Heo 		 * are not going to wait.
2278db700897SOleg Nesterov 		 */
2279db700897SOleg Nesterov 		smp_rmb();
22807a22ad75STejun Heo 		cwq = get_work_cwq(work);
22817a22ad75STejun Heo 		if (unlikely(!cwq || gcwq != cwq->gcwq))
22824690c4abSTejun Heo 			goto already_gone;
2283db700897SOleg Nesterov 	} else {
22847a22ad75STejun Heo 		worker = find_worker_executing_work(gcwq, work);
2285affee4b2STejun Heo 		if (!worker)
22864690c4abSTejun Heo 			goto already_gone;
22877a22ad75STejun Heo 		cwq = worker->current_cwq;
2288db700897SOleg Nesterov 	}
2289db700897SOleg Nesterov 
2290affee4b2STejun Heo 	insert_wq_barrier(cwq, &barr, work, worker);
22918b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
22927a22ad75STejun Heo 
22937a22ad75STejun Heo 	lock_map_acquire(&cwq->wq->lockdep_map);
22947a22ad75STejun Heo 	lock_map_release(&cwq->wq->lockdep_map);
22957a22ad75STejun Heo 
2296db700897SOleg Nesterov 	wait_for_completion(&barr.done);
2297dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
2298db700897SOleg Nesterov 	return 1;
22994690c4abSTejun Heo already_gone:
23008b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
23014690c4abSTejun Heo 	return 0;
2302db700897SOleg Nesterov }
2303db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
2304db700897SOleg Nesterov 
23056e84d644SOleg Nesterov /*
23061f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
23076e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
23086e84d644SOleg Nesterov  */
23096e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
23106e84d644SOleg Nesterov {
23118b03ae3cSTejun Heo 	struct global_cwq *gcwq;
23121f1f642eSOleg Nesterov 	int ret = -1;
23136e84d644SOleg Nesterov 
231422df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
23151f1f642eSOleg Nesterov 		return 0;
23166e84d644SOleg Nesterov 
23176e84d644SOleg Nesterov 	/*
23186e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
23196e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
23206e84d644SOleg Nesterov 	 */
23217a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
23227a22ad75STejun Heo 	if (!gcwq)
23236e84d644SOleg Nesterov 		return ret;
23246e84d644SOleg Nesterov 
23258b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
23266e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
23276e84d644SOleg Nesterov 		/*
23287a22ad75STejun Heo 		 * This work is queued, but perhaps we locked the wrong gcwq.
23296e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
23306e84d644SOleg Nesterov 		 * insert_work()->wmb().
23316e84d644SOleg Nesterov 		 */
23326e84d644SOleg Nesterov 		smp_rmb();
23337a22ad75STejun Heo 		if (gcwq == get_work_gcwq(work)) {
2334dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
23356e84d644SOleg Nesterov 			list_del_init(&work->entry);
23367a22ad75STejun Heo 			cwq_dec_nr_in_flight(get_work_cwq(work),
23377a22ad75STejun Heo 					     get_work_color(work));
23386e84d644SOleg Nesterov 			ret = 1;
23396e84d644SOleg Nesterov 		}
23406e84d644SOleg Nesterov 	}
23418b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
23426e84d644SOleg Nesterov 
23436e84d644SOleg Nesterov 	return ret;
23446e84d644SOleg Nesterov }
23456e84d644SOleg Nesterov 
23467a22ad75STejun Heo static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2347b89deed3SOleg Nesterov {
2348b89deed3SOleg Nesterov 	struct wq_barrier barr;
2349affee4b2STejun Heo 	struct worker *worker;
2350b89deed3SOleg Nesterov 
23518b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2352affee4b2STejun Heo 
23537a22ad75STejun Heo 	worker = find_worker_executing_work(gcwq, work);
23547a22ad75STejun Heo 	if (unlikely(worker))
23557a22ad75STejun Heo 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2356affee4b2STejun Heo 
23578b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
2358b89deed3SOleg Nesterov 
2359affee4b2STejun Heo 	if (unlikely(worker)) {
2360b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
2361dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
2362dc186ad7SThomas Gleixner 	}
2363b89deed3SOleg Nesterov }
2364b89deed3SOleg Nesterov 
23656e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
2366b89deed3SOleg Nesterov {
2367b1f4ec17SOleg Nesterov 	int cpu;
2368b89deed3SOleg Nesterov 
2369f293ea92SOleg Nesterov 	might_sleep();
2370f293ea92SOleg Nesterov 
23713295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
23723295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
23734e6045f1SJohannes Berg 
2374f3421797STejun Heo 	for_each_gcwq_cpu(cpu)
23757a22ad75STejun Heo 		wait_on_cpu_work(get_gcwq(cpu), work);
23766e84d644SOleg Nesterov }
23776e84d644SOleg Nesterov 
23781f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
23791f1f642eSOleg Nesterov 				struct timer_list* timer)
23801f1f642eSOleg Nesterov {
23811f1f642eSOleg Nesterov 	int ret;
23821f1f642eSOleg Nesterov 
23831f1f642eSOleg Nesterov 	do {
23841f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
23851f1f642eSOleg Nesterov 		if (!ret)
23861f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
23871f1f642eSOleg Nesterov 		wait_on_work(work);
23881f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
23891f1f642eSOleg Nesterov 
23907a22ad75STejun Heo 	clear_work_data(work);
23911f1f642eSOleg Nesterov 	return ret;
23921f1f642eSOleg Nesterov }
23931f1f642eSOleg Nesterov 
23946e84d644SOleg Nesterov /**
23956e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
23966e84d644SOleg Nesterov  * @work: the work which is to be flushed
23976e84d644SOleg Nesterov  *
23981f1f642eSOleg Nesterov  * Returns true if @work was pending.
23991f1f642eSOleg Nesterov  *
24006e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
24016e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
24026e84d644SOleg Nesterov  * has completed.
24036e84d644SOleg Nesterov  *
24046e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
24056e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
24066e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
24076e84d644SOleg Nesterov  * workqueue.
24086e84d644SOleg Nesterov  *
24096e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
24106e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
24116e84d644SOleg Nesterov  *
24126e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
24136e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
24146e84d644SOleg Nesterov  */
24151f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
24166e84d644SOleg Nesterov {
24171f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
2418b89deed3SOleg Nesterov }
241928e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
2420b89deed3SOleg Nesterov 
24216e84d644SOleg Nesterov /**
2422f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
24236e84d644SOleg Nesterov  * @dwork: the delayed work struct
24246e84d644SOleg Nesterov  *
24251f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
24261f1f642eSOleg Nesterov  *
24276e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
24286e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
24296e84d644SOleg Nesterov  */
24301f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
24316e84d644SOleg Nesterov {
24321f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
24336e84d644SOleg Nesterov }
2434f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
24351da177e4SLinus Torvalds 
24360fcb78c2SRolf Eike Beer /**
24370fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
24380fcb78c2SRolf Eike Beer  * @work: job to be done
24390fcb78c2SRolf Eike Beer  *
24405b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
24415b0f437dSBart Van Assche  * non-zero otherwise.
24425b0f437dSBart Van Assche  *
24435b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
24445b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
24455b0f437dSBart Van Assche  * workqueue otherwise.
24460fcb78c2SRolf Eike Beer  */
24477ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
24481da177e4SLinus Torvalds {
2449d320c038STejun Heo 	return queue_work(system_wq, work);
24501da177e4SLinus Torvalds }
2451ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
24521da177e4SLinus Torvalds 
2453c1a220e7SZhang Rui /*
2454c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
2455c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
2456c1a220e7SZhang Rui  * @work: job to be done
2457c1a220e7SZhang Rui  *
2458c1a220e7SZhang Rui  * This puts a job on a specific cpu
2459c1a220e7SZhang Rui  */
2460c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
2461c1a220e7SZhang Rui {
2462d320c038STejun Heo 	return queue_work_on(cpu, system_wq, work);
2463c1a220e7SZhang Rui }
2464c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
2465c1a220e7SZhang Rui 
24660fcb78c2SRolf Eike Beer /**
24670fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
246852bad64dSDavid Howells  * @dwork: job to be done
246952bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
24700fcb78c2SRolf Eike Beer  *
24710fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
24720fcb78c2SRolf Eike Beer  * workqueue.
24730fcb78c2SRolf Eike Beer  */
24747ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
247582f67cd9SIngo Molnar 					unsigned long delay)
24761da177e4SLinus Torvalds {
2477d320c038STejun Heo 	return queue_delayed_work(system_wq, dwork, delay);
24781da177e4SLinus Torvalds }
2479ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
24801da177e4SLinus Torvalds 
24810fcb78c2SRolf Eike Beer /**
24828c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
24838c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
24848c53e463SLinus Torvalds  *
24858c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
24868c53e463SLinus Torvalds  */
24878c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
24888c53e463SLinus Torvalds {
24898c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
24907a22ad75STejun Heo 		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
24914690c4abSTejun Heo 			     &dwork->work);
24928c53e463SLinus Torvalds 		put_cpu();
24938c53e463SLinus Torvalds 	}
24948c53e463SLinus Torvalds 	flush_work(&dwork->work);
24958c53e463SLinus Torvalds }
24968c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
24978c53e463SLinus Torvalds 
24988c53e463SLinus Torvalds /**
24990fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
25000fcb78c2SRolf Eike Beer  * @cpu: cpu to use
250152bad64dSDavid Howells  * @dwork: job to be done
25020fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
25030fcb78c2SRolf Eike Beer  *
25040fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
25050fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
25060fcb78c2SRolf Eike Beer  */
25071da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
250852bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
25091da177e4SLinus Torvalds {
2510d320c038STejun Heo 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
25111da177e4SLinus Torvalds }
2512ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
25131da177e4SLinus Torvalds 
2514b6136773SAndrew Morton /**
2515b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
2516b6136773SAndrew Morton  * @func: the function to call
2517b6136773SAndrew Morton  *
2518b6136773SAndrew Morton  * Returns zero on success.
2519b6136773SAndrew Morton  * Returns -ve errno on failure.
2520b6136773SAndrew Morton  *
2521b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
2522b6136773SAndrew Morton  */
252365f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
252415316ba8SChristoph Lameter {
252515316ba8SChristoph Lameter 	int cpu;
2526b6136773SAndrew Morton 	struct work_struct *works;
252715316ba8SChristoph Lameter 
2528b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
2529b6136773SAndrew Morton 	if (!works)
253015316ba8SChristoph Lameter 		return -ENOMEM;
2531b6136773SAndrew Morton 
253295402b38SGautham R Shenoy 	get_online_cpus();
253393981800STejun Heo 
253415316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
25359bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
25369bfb1839SIngo Molnar 
25379bfb1839SIngo Molnar 		INIT_WORK(work, func);
25388de6d308SOleg Nesterov 		schedule_work_on(cpu, work);
253915316ba8SChristoph Lameter 	}
254093981800STejun Heo 
254193981800STejun Heo 	for_each_online_cpu(cpu)
25428616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
254393981800STejun Heo 
254495402b38SGautham R Shenoy 	put_online_cpus();
2545b6136773SAndrew Morton 	free_percpu(works);
254615316ba8SChristoph Lameter 	return 0;
254715316ba8SChristoph Lameter }
254815316ba8SChristoph Lameter 
2549eef6a7d5SAlan Stern /**
2550eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
2551eef6a7d5SAlan Stern  *
2552eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
2553eef6a7d5SAlan Stern  * completion.
2554eef6a7d5SAlan Stern  *
2555eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
2556eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
2557eef6a7d5SAlan Stern  * will lead to deadlock:
2558eef6a7d5SAlan Stern  *
2559eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
2560eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
2561eef6a7d5SAlan Stern  *
2562eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
2563eef6a7d5SAlan Stern  *
2564eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
2565eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
2566eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
2567eef6a7d5SAlan Stern  *
2568eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
2569eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
2570eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
2571eef6a7d5SAlan Stern  * cancel_work_sync() instead.
2572eef6a7d5SAlan Stern  */
25731da177e4SLinus Torvalds void flush_scheduled_work(void)
25741da177e4SLinus Torvalds {
2575d320c038STejun Heo 	flush_workqueue(system_wq);
25761da177e4SLinus Torvalds }
2577ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
25781da177e4SLinus Torvalds 
25791da177e4SLinus Torvalds /**
25801fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
25811fa44ecaSJames Bottomley  * @fn:		the function to execute
25821fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
25831fa44ecaSJames Bottomley  *		be available when the work executes)
25841fa44ecaSJames Bottomley  *
25851fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
25861fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
25871fa44ecaSJames Bottomley  *
25881fa44ecaSJames Bottomley  * Returns:	0 - function was executed
25891fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
25901fa44ecaSJames Bottomley  */
259165f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
25921fa44ecaSJames Bottomley {
25931fa44ecaSJames Bottomley 	if (!in_interrupt()) {
259465f27f38SDavid Howells 		fn(&ew->work);
25951fa44ecaSJames Bottomley 		return 0;
25961fa44ecaSJames Bottomley 	}
25971fa44ecaSJames Bottomley 
259865f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
25991fa44ecaSJames Bottomley 	schedule_work(&ew->work);
26001fa44ecaSJames Bottomley 
26011fa44ecaSJames Bottomley 	return 1;
26021fa44ecaSJames Bottomley }
26031fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
26041fa44ecaSJames Bottomley 
26051da177e4SLinus Torvalds int keventd_up(void)
26061da177e4SLinus Torvalds {
2607d320c038STejun Heo 	return system_wq != NULL;
26081da177e4SLinus Torvalds }
26091da177e4SLinus Torvalds 
2610bdbc5dd7STejun Heo static int alloc_cwqs(struct workqueue_struct *wq)
26110f900049STejun Heo {
26120f900049STejun Heo 	/*
26130f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
26140f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
26150f900049STejun Heo 	 * unsigned long long.
26160f900049STejun Heo 	 */
26170f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
26180f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
26190f900049STejun Heo 				   __alignof__(unsigned long long));
2620f3421797STejun Heo 
2621f3421797STejun Heo 	if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) {
2622f3421797STejun Heo 		/* on SMP, percpu allocator can align itself */
2623f3421797STejun Heo 		wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2624f3421797STejun Heo 	} else {
26250f900049STejun Heo 		void *ptr;
26260f900049STejun Heo 
26270f900049STejun Heo 		/*
2628f3421797STejun Heo 		 * Allocate enough room to align cwq and put an extra
2629f3421797STejun Heo 		 * pointer at the end pointing back to the originally
2630f3421797STejun Heo 		 * allocated pointer which will be used for free.
26310f900049STejun Heo 		 */
2632bdbc5dd7STejun Heo 		ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2633bdbc5dd7STejun Heo 		if (ptr) {
2634bdbc5dd7STejun Heo 			wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2635bdbc5dd7STejun Heo 			*(void **)(wq->cpu_wq.single + 1) = ptr;
2636bdbc5dd7STejun Heo 		}
2637f3421797STejun Heo 	}
2638f3421797STejun Heo 
26390f900049STejun Heo 	/* just in case, make sure it's actually aligned */
2640bdbc5dd7STejun Heo 	BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2641bdbc5dd7STejun Heo 	return wq->cpu_wq.v ? 0 : -ENOMEM;
26420f900049STejun Heo }
26430f900049STejun Heo 
2644bdbc5dd7STejun Heo static void free_cwqs(struct workqueue_struct *wq)
26450f900049STejun Heo {
2646f3421797STejun Heo 	if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND))
2647bdbc5dd7STejun Heo 		free_percpu(wq->cpu_wq.pcpu);
2648f3421797STejun Heo 	else if (wq->cpu_wq.single) {
2649f3421797STejun Heo 		/* the pointer to free is stored right after the cwq */
2650f3421797STejun Heo 		kfree(*(void **)(wq->cpu_wq.single + 1));
2651f3421797STejun Heo 	}
26520f900049STejun Heo }
26530f900049STejun Heo 
2654f3421797STejun Heo static int wq_clamp_max_active(int max_active, unsigned int flags,
2655f3421797STejun Heo 			       const char *name)
2656b71ab8c2STejun Heo {
2657f3421797STejun Heo 	int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2658f3421797STejun Heo 
2659f3421797STejun Heo 	if (max_active < 1 || max_active > lim)
2660b71ab8c2STejun Heo 		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2661b71ab8c2STejun Heo 		       "is out of range, clamping between %d and %d\n",
2662f3421797STejun Heo 		       max_active, name, 1, lim);
2663b71ab8c2STejun Heo 
2664f3421797STejun Heo 	return clamp_val(max_active, 1, lim);
2665b71ab8c2STejun Heo }
2666b71ab8c2STejun Heo 
2667d320c038STejun Heo struct workqueue_struct *__alloc_workqueue_key(const char *name,
266897e37d7bSTejun Heo 					       unsigned int flags,
26691e19ffc6STejun Heo 					       int max_active,
2670eb13ba87SJohannes Berg 					       struct lock_class_key *key,
2671eb13ba87SJohannes Berg 					       const char *lock_name)
26723af24433SOleg Nesterov {
26733af24433SOleg Nesterov 	struct workqueue_struct *wq;
2674c34056a3STejun Heo 	unsigned int cpu;
26753af24433SOleg Nesterov 
2676f3421797STejun Heo 	/*
2677f3421797STejun Heo 	 * Unbound workqueues aren't concurrency managed and should be
2678f3421797STejun Heo 	 * dispatched to workers immediately.
2679f3421797STejun Heo 	 */
2680f3421797STejun Heo 	if (flags & WQ_UNBOUND)
2681f3421797STejun Heo 		flags |= WQ_HIGHPRI;
2682f3421797STejun Heo 
2683d320c038STejun Heo 	max_active = max_active ?: WQ_DFL_ACTIVE;
2684f3421797STejun Heo 	max_active = wq_clamp_max_active(max_active, flags, name);
26851e19ffc6STejun Heo 
26863af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
26873af24433SOleg Nesterov 	if (!wq)
26884690c4abSTejun Heo 		goto err;
26893af24433SOleg Nesterov 
269097e37d7bSTejun Heo 	wq->flags = flags;
2691a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
269273f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
269373f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
269473f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
269573f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
2696502ca9d8STejun Heo 
26973af24433SOleg Nesterov 	wq->name = name;
2698eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2699cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
27003af24433SOleg Nesterov 
2701bdbc5dd7STejun Heo 	if (alloc_cwqs(wq) < 0)
2702bdbc5dd7STejun Heo 		goto err;
2703bdbc5dd7STejun Heo 
2704f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
27051537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
27068b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
27071537663fSTejun Heo 
27080f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
27098b03ae3cSTejun Heo 		cwq->gcwq = gcwq;
2710c34056a3STejun Heo 		cwq->wq = wq;
271173f53c4aSTejun Heo 		cwq->flush_color = -1;
27121e19ffc6STejun Heo 		cwq->max_active = max_active;
27131e19ffc6STejun Heo 		INIT_LIST_HEAD(&cwq->delayed_works);
2714e22bee78STejun Heo 	}
27151537663fSTejun Heo 
2716e22bee78STejun Heo 	if (flags & WQ_RESCUER) {
2717e22bee78STejun Heo 		struct worker *rescuer;
2718e22bee78STejun Heo 
2719e22bee78STejun Heo 		if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL))
2720e22bee78STejun Heo 			goto err;
2721e22bee78STejun Heo 
2722e22bee78STejun Heo 		wq->rescuer = rescuer = alloc_worker();
2723e22bee78STejun Heo 		if (!rescuer)
2724e22bee78STejun Heo 			goto err;
2725e22bee78STejun Heo 
2726e22bee78STejun Heo 		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2727e22bee78STejun Heo 		if (IS_ERR(rescuer->task))
2728e22bee78STejun Heo 			goto err;
2729e22bee78STejun Heo 
2730e22bee78STejun Heo 		wq->rescuer = rescuer;
2731e22bee78STejun Heo 		rescuer->task->flags |= PF_THREAD_BOUND;
2732e22bee78STejun Heo 		wake_up_process(rescuer->task);
27333af24433SOleg Nesterov 	}
27341537663fSTejun Heo 
2735a0a1a5fdSTejun Heo 	/*
2736a0a1a5fdSTejun Heo 	 * workqueue_lock protects global freeze state and workqueues
2737a0a1a5fdSTejun Heo 	 * list.  Grab it, set max_active accordingly and add the new
2738a0a1a5fdSTejun Heo 	 * workqueue to workqueues list.
2739a0a1a5fdSTejun Heo 	 */
27401537663fSTejun Heo 	spin_lock(&workqueue_lock);
2741a0a1a5fdSTejun Heo 
2742a0a1a5fdSTejun Heo 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2743f3421797STejun Heo 		for_each_cwq_cpu(cpu, wq)
2744a0a1a5fdSTejun Heo 			get_cwq(cpu, wq)->max_active = 0;
2745a0a1a5fdSTejun Heo 
27461537663fSTejun Heo 	list_add(&wq->list, &workqueues);
2747a0a1a5fdSTejun Heo 
27481537663fSTejun Heo 	spin_unlock(&workqueue_lock);
27491537663fSTejun Heo 
27503af24433SOleg Nesterov 	return wq;
27514690c4abSTejun Heo err:
27524690c4abSTejun Heo 	if (wq) {
2753bdbc5dd7STejun Heo 		free_cwqs(wq);
2754e22bee78STejun Heo 		free_cpumask_var(wq->mayday_mask);
2755e22bee78STejun Heo 		kfree(wq->rescuer);
27564690c4abSTejun Heo 		kfree(wq);
27574690c4abSTejun Heo 	}
27584690c4abSTejun Heo 	return NULL;
27593af24433SOleg Nesterov }
2760d320c038STejun Heo EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
27613af24433SOleg Nesterov 
27623af24433SOleg Nesterov /**
27633af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
27643af24433SOleg Nesterov  * @wq: target workqueue
27653af24433SOleg Nesterov  *
27663af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
27673af24433SOleg Nesterov  */
27683af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
27693af24433SOleg Nesterov {
2770c8e55f36STejun Heo 	unsigned int cpu;
27713af24433SOleg Nesterov 
2772a0a1a5fdSTejun Heo 	flush_workqueue(wq);
2773a0a1a5fdSTejun Heo 
2774a0a1a5fdSTejun Heo 	/*
2775a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
2776a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
2777a0a1a5fdSTejun Heo 	 */
277895402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
27793af24433SOleg Nesterov 	list_del(&wq->list);
278095402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
27813af24433SOleg Nesterov 
2782e22bee78STejun Heo 	/* sanity check */
2783f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
278473f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
278573f53c4aSTejun Heo 		int i;
278673f53c4aSTejun Heo 
278773f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
278873f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
27891e19ffc6STejun Heo 		BUG_ON(cwq->nr_active);
27901e19ffc6STejun Heo 		BUG_ON(!list_empty(&cwq->delayed_works));
279173f53c4aSTejun Heo 	}
27921537663fSTejun Heo 
2793e22bee78STejun Heo 	if (wq->flags & WQ_RESCUER) {
2794e22bee78STejun Heo 		kthread_stop(wq->rescuer->task);
2795e22bee78STejun Heo 		free_cpumask_var(wq->mayday_mask);
2796e22bee78STejun Heo 	}
2797e22bee78STejun Heo 
2798bdbc5dd7STejun Heo 	free_cwqs(wq);
27993af24433SOleg Nesterov 	kfree(wq);
28003af24433SOleg Nesterov }
28013af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
28023af24433SOleg Nesterov 
2803dcd989cbSTejun Heo /**
2804dcd989cbSTejun Heo  * workqueue_set_max_active - adjust max_active of a workqueue
2805dcd989cbSTejun Heo  * @wq: target workqueue
2806dcd989cbSTejun Heo  * @max_active: new max_active value.
2807dcd989cbSTejun Heo  *
2808dcd989cbSTejun Heo  * Set max_active of @wq to @max_active.
2809dcd989cbSTejun Heo  *
2810dcd989cbSTejun Heo  * CONTEXT:
2811dcd989cbSTejun Heo  * Don't call from IRQ context.
2812dcd989cbSTejun Heo  */
2813dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2814dcd989cbSTejun Heo {
2815dcd989cbSTejun Heo 	unsigned int cpu;
2816dcd989cbSTejun Heo 
2817f3421797STejun Heo 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
2818dcd989cbSTejun Heo 
2819dcd989cbSTejun Heo 	spin_lock(&workqueue_lock);
2820dcd989cbSTejun Heo 
2821dcd989cbSTejun Heo 	wq->saved_max_active = max_active;
2822dcd989cbSTejun Heo 
2823f3421797STejun Heo 	for_each_cwq_cpu(cpu, wq) {
2824dcd989cbSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
2825dcd989cbSTejun Heo 
2826dcd989cbSTejun Heo 		spin_lock_irq(&gcwq->lock);
2827dcd989cbSTejun Heo 
2828dcd989cbSTejun Heo 		if (!(wq->flags & WQ_FREEZEABLE) ||
2829dcd989cbSTejun Heo 		    !(gcwq->flags & GCWQ_FREEZING))
2830dcd989cbSTejun Heo 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
2831dcd989cbSTejun Heo 
2832dcd989cbSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2833dcd989cbSTejun Heo 	}
2834dcd989cbSTejun Heo 
2835dcd989cbSTejun Heo 	spin_unlock(&workqueue_lock);
2836dcd989cbSTejun Heo }
2837dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2838dcd989cbSTejun Heo 
2839dcd989cbSTejun Heo /**
2840dcd989cbSTejun Heo  * workqueue_congested - test whether a workqueue is congested
2841dcd989cbSTejun Heo  * @cpu: CPU in question
2842dcd989cbSTejun Heo  * @wq: target workqueue
2843dcd989cbSTejun Heo  *
2844dcd989cbSTejun Heo  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
2845dcd989cbSTejun Heo  * no synchronization around this function and the test result is
2846dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2847dcd989cbSTejun Heo  *
2848dcd989cbSTejun Heo  * RETURNS:
2849dcd989cbSTejun Heo  * %true if congested, %false otherwise.
2850dcd989cbSTejun Heo  */
2851dcd989cbSTejun Heo bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2852dcd989cbSTejun Heo {
2853dcd989cbSTejun Heo 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2854dcd989cbSTejun Heo 
2855dcd989cbSTejun Heo 	return !list_empty(&cwq->delayed_works);
2856dcd989cbSTejun Heo }
2857dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested);
2858dcd989cbSTejun Heo 
2859dcd989cbSTejun Heo /**
2860dcd989cbSTejun Heo  * work_cpu - return the last known associated cpu for @work
2861dcd989cbSTejun Heo  * @work: the work of interest
2862dcd989cbSTejun Heo  *
2863dcd989cbSTejun Heo  * RETURNS:
2864bdbc5dd7STejun Heo  * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
2865dcd989cbSTejun Heo  */
2866dcd989cbSTejun Heo unsigned int work_cpu(struct work_struct *work)
2867dcd989cbSTejun Heo {
2868dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2869dcd989cbSTejun Heo 
2870bdbc5dd7STejun Heo 	return gcwq ? gcwq->cpu : WORK_CPU_NONE;
2871dcd989cbSTejun Heo }
2872dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_cpu);
2873dcd989cbSTejun Heo 
2874dcd989cbSTejun Heo /**
2875dcd989cbSTejun Heo  * work_busy - test whether a work is currently pending or running
2876dcd989cbSTejun Heo  * @work: the work to be tested
2877dcd989cbSTejun Heo  *
2878dcd989cbSTejun Heo  * Test whether @work is currently pending or running.  There is no
2879dcd989cbSTejun Heo  * synchronization around this function and the test result is
2880dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2881dcd989cbSTejun Heo  * Especially for reentrant wqs, the pending state might hide the
2882dcd989cbSTejun Heo  * running state.
2883dcd989cbSTejun Heo  *
2884dcd989cbSTejun Heo  * RETURNS:
2885dcd989cbSTejun Heo  * OR'd bitmask of WORK_BUSY_* bits.
2886dcd989cbSTejun Heo  */
2887dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work)
2888dcd989cbSTejun Heo {
2889dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2890dcd989cbSTejun Heo 	unsigned long flags;
2891dcd989cbSTejun Heo 	unsigned int ret = 0;
2892dcd989cbSTejun Heo 
2893dcd989cbSTejun Heo 	if (!gcwq)
2894dcd989cbSTejun Heo 		return false;
2895dcd989cbSTejun Heo 
2896dcd989cbSTejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
2897dcd989cbSTejun Heo 
2898dcd989cbSTejun Heo 	if (work_pending(work))
2899dcd989cbSTejun Heo 		ret |= WORK_BUSY_PENDING;
2900dcd989cbSTejun Heo 	if (find_worker_executing_work(gcwq, work))
2901dcd989cbSTejun Heo 		ret |= WORK_BUSY_RUNNING;
2902dcd989cbSTejun Heo 
2903dcd989cbSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
2904dcd989cbSTejun Heo 
2905dcd989cbSTejun Heo 	return ret;
2906dcd989cbSTejun Heo }
2907dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy);
2908dcd989cbSTejun Heo 
2909db7bccf4STejun Heo /*
2910db7bccf4STejun Heo  * CPU hotplug.
2911db7bccf4STejun Heo  *
2912e22bee78STejun Heo  * There are two challenges in supporting CPU hotplug.  Firstly, there
2913e22bee78STejun Heo  * are a lot of assumptions on strong associations among work, cwq and
2914e22bee78STejun Heo  * gcwq which make migrating pending and scheduled works very
2915e22bee78STejun Heo  * difficult to implement without impacting hot paths.  Secondly,
2916e22bee78STejun Heo  * gcwqs serve mix of short, long and very long running works making
2917e22bee78STejun Heo  * blocked draining impractical.
2918e22bee78STejun Heo  *
2919e22bee78STejun Heo  * This is solved by allowing a gcwq to be detached from CPU, running
2920e22bee78STejun Heo  * it with unbound (rogue) workers and allowing it to be reattached
2921e22bee78STejun Heo  * later if the cpu comes back online.  A separate thread is created
2922e22bee78STejun Heo  * to govern a gcwq in such state and is called the trustee of the
2923e22bee78STejun Heo  * gcwq.
2924db7bccf4STejun Heo  *
2925db7bccf4STejun Heo  * Trustee states and their descriptions.
2926db7bccf4STejun Heo  *
2927db7bccf4STejun Heo  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
2928db7bccf4STejun Heo  *		new trustee is started with this state.
2929db7bccf4STejun Heo  *
2930db7bccf4STejun Heo  * IN_CHARGE	Once started, trustee will enter this state after
2931e22bee78STejun Heo  *		assuming the manager role and making all existing
2932e22bee78STejun Heo  *		workers rogue.  DOWN_PREPARE waits for trustee to
2933e22bee78STejun Heo  *		enter this state.  After reaching IN_CHARGE, trustee
2934e22bee78STejun Heo  *		tries to execute the pending worklist until it's empty
2935e22bee78STejun Heo  *		and the state is set to BUTCHER, or the state is set
2936e22bee78STejun Heo  *		to RELEASE.
2937db7bccf4STejun Heo  *
2938db7bccf4STejun Heo  * BUTCHER	Command state which is set by the cpu callback after
2939db7bccf4STejun Heo  *		the cpu has went down.  Once this state is set trustee
2940db7bccf4STejun Heo  *		knows that there will be no new works on the worklist
2941db7bccf4STejun Heo  *		and once the worklist is empty it can proceed to
2942db7bccf4STejun Heo  *		killing idle workers.
2943db7bccf4STejun Heo  *
2944db7bccf4STejun Heo  * RELEASE	Command state which is set by the cpu callback if the
2945db7bccf4STejun Heo  *		cpu down has been canceled or it has come online
2946db7bccf4STejun Heo  *		again.  After recognizing this state, trustee stops
2947e22bee78STejun Heo  *		trying to drain or butcher and clears ROGUE, rebinds
2948e22bee78STejun Heo  *		all remaining workers back to the cpu and releases
2949e22bee78STejun Heo  *		manager role.
2950db7bccf4STejun Heo  *
2951db7bccf4STejun Heo  * DONE		Trustee will enter this state after BUTCHER or RELEASE
2952db7bccf4STejun Heo  *		is complete.
2953db7bccf4STejun Heo  *
2954db7bccf4STejun Heo  *          trustee                 CPU                draining
2955db7bccf4STejun Heo  *         took over                down               complete
2956db7bccf4STejun Heo  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2957db7bccf4STejun Heo  *                        |                     |                  ^
2958db7bccf4STejun Heo  *                        | CPU is back online  v   return workers |
2959db7bccf4STejun Heo  *                         ----------------> RELEASE --------------
2960db7bccf4STejun Heo  */
2961db7bccf4STejun Heo 
2962db7bccf4STejun Heo /**
2963db7bccf4STejun Heo  * trustee_wait_event_timeout - timed event wait for trustee
2964db7bccf4STejun Heo  * @cond: condition to wait for
2965db7bccf4STejun Heo  * @timeout: timeout in jiffies
2966db7bccf4STejun Heo  *
2967db7bccf4STejun Heo  * wait_event_timeout() for trustee to use.  Handles locking and
2968db7bccf4STejun Heo  * checks for RELEASE request.
2969db7bccf4STejun Heo  *
2970db7bccf4STejun Heo  * CONTEXT:
2971db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2972db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2973db7bccf4STejun Heo  *
2974db7bccf4STejun Heo  * RETURNS:
2975db7bccf4STejun Heo  * Positive indicating left time if @cond is satisfied, 0 if timed
2976db7bccf4STejun Heo  * out, -1 if canceled.
2977db7bccf4STejun Heo  */
2978db7bccf4STejun Heo #define trustee_wait_event_timeout(cond, timeout) ({			\
2979db7bccf4STejun Heo 	long __ret = (timeout);						\
2980db7bccf4STejun Heo 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
2981db7bccf4STejun Heo 	       __ret) {							\
2982db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);				\
2983db7bccf4STejun Heo 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
2984db7bccf4STejun Heo 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
2985db7bccf4STejun Heo 			__ret);						\
2986db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);				\
2987db7bccf4STejun Heo 	}								\
2988db7bccf4STejun Heo 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
2989db7bccf4STejun Heo })
2990db7bccf4STejun Heo 
2991db7bccf4STejun Heo /**
2992db7bccf4STejun Heo  * trustee_wait_event - event wait for trustee
2993db7bccf4STejun Heo  * @cond: condition to wait for
2994db7bccf4STejun Heo  *
2995db7bccf4STejun Heo  * wait_event() for trustee to use.  Automatically handles locking and
2996db7bccf4STejun Heo  * checks for CANCEL request.
2997db7bccf4STejun Heo  *
2998db7bccf4STejun Heo  * CONTEXT:
2999db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3000db7bccf4STejun Heo  * multiple times.  To be used by trustee.
3001db7bccf4STejun Heo  *
3002db7bccf4STejun Heo  * RETURNS:
3003db7bccf4STejun Heo  * 0 if @cond is satisfied, -1 if canceled.
3004db7bccf4STejun Heo  */
3005db7bccf4STejun Heo #define trustee_wait_event(cond) ({					\
3006db7bccf4STejun Heo 	long __ret1;							\
3007db7bccf4STejun Heo 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3008db7bccf4STejun Heo 	__ret1 < 0 ? -1 : 0;						\
3009db7bccf4STejun Heo })
3010db7bccf4STejun Heo 
3011db7bccf4STejun Heo static int __cpuinit trustee_thread(void *__gcwq)
3012db7bccf4STejun Heo {
3013db7bccf4STejun Heo 	struct global_cwq *gcwq = __gcwq;
3014db7bccf4STejun Heo 	struct worker *worker;
3015e22bee78STejun Heo 	struct work_struct *work;
3016db7bccf4STejun Heo 	struct hlist_node *pos;
3017e22bee78STejun Heo 	long rc;
3018db7bccf4STejun Heo 	int i;
3019db7bccf4STejun Heo 
3020db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
3021db7bccf4STejun Heo 
3022db7bccf4STejun Heo 	spin_lock_irq(&gcwq->lock);
3023db7bccf4STejun Heo 	/*
3024e22bee78STejun Heo 	 * Claim the manager position and make all workers rogue.
3025e22bee78STejun Heo 	 * Trustee must be bound to the target cpu and can't be
3026e22bee78STejun Heo 	 * cancelled.
3027db7bccf4STejun Heo 	 */
3028db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
3029e22bee78STejun Heo 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3030e22bee78STejun Heo 	BUG_ON(rc < 0);
3031e22bee78STejun Heo 
3032e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
3033db7bccf4STejun Heo 
3034db7bccf4STejun Heo 	list_for_each_entry(worker, &gcwq->idle_list, entry)
3035cb444766STejun Heo 		worker->flags |= WORKER_ROGUE;
3036db7bccf4STejun Heo 
3037db7bccf4STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq)
3038cb444766STejun Heo 		worker->flags |= WORKER_ROGUE;
3039db7bccf4STejun Heo 
3040db7bccf4STejun Heo 	/*
3041e22bee78STejun Heo 	 * Call schedule() so that we cross rq->lock and thus can
3042e22bee78STejun Heo 	 * guarantee sched callbacks see the rogue flag.  This is
3043e22bee78STejun Heo 	 * necessary as scheduler callbacks may be invoked from other
3044e22bee78STejun Heo 	 * cpus.
3045e22bee78STejun Heo 	 */
3046e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
3047e22bee78STejun Heo 	schedule();
3048e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
3049e22bee78STejun Heo 
3050e22bee78STejun Heo 	/*
3051cb444766STejun Heo 	 * Sched callbacks are disabled now.  Zap nr_running.  After
3052cb444766STejun Heo 	 * this, nr_running stays zero and need_more_worker() and
3053cb444766STejun Heo 	 * keep_working() are always true as long as the worklist is
3054cb444766STejun Heo 	 * not empty.
3055e22bee78STejun Heo 	 */
3056cb444766STejun Heo 	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3057e22bee78STejun Heo 
3058e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
3059e22bee78STejun Heo 	del_timer_sync(&gcwq->idle_timer);
3060e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
3061e22bee78STejun Heo 
3062e22bee78STejun Heo 	/*
3063db7bccf4STejun Heo 	 * We're now in charge.  Notify and proceed to drain.  We need
3064db7bccf4STejun Heo 	 * to keep the gcwq running during the whole CPU down
3065db7bccf4STejun Heo 	 * procedure as other cpu hotunplug callbacks may need to
3066db7bccf4STejun Heo 	 * flush currently running tasks.
3067db7bccf4STejun Heo 	 */
3068db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3069db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3070db7bccf4STejun Heo 
3071db7bccf4STejun Heo 	/*
3072db7bccf4STejun Heo 	 * The original cpu is in the process of dying and may go away
3073db7bccf4STejun Heo 	 * anytime now.  When that happens, we and all workers would
3074e22bee78STejun Heo 	 * be migrated to other cpus.  Try draining any left work.  We
3075e22bee78STejun Heo 	 * want to get it over with ASAP - spam rescuers, wake up as
3076e22bee78STejun Heo 	 * many idlers as necessary and create new ones till the
3077e22bee78STejun Heo 	 * worklist is empty.  Note that if the gcwq is frozen, there
3078e22bee78STejun Heo 	 * may be frozen works in freezeable cwqs.  Don't declare
3079e22bee78STejun Heo 	 * completion while frozen.
3080db7bccf4STejun Heo 	 */
3081db7bccf4STejun Heo 	while (gcwq->nr_workers != gcwq->nr_idle ||
3082db7bccf4STejun Heo 	       gcwq->flags & GCWQ_FREEZING ||
3083db7bccf4STejun Heo 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3084e22bee78STejun Heo 		int nr_works = 0;
3085e22bee78STejun Heo 
3086e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry) {
3087e22bee78STejun Heo 			send_mayday(work);
3088e22bee78STejun Heo 			nr_works++;
3089e22bee78STejun Heo 		}
3090e22bee78STejun Heo 
3091e22bee78STejun Heo 		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3092e22bee78STejun Heo 			if (!nr_works--)
3093e22bee78STejun Heo 				break;
3094e22bee78STejun Heo 			wake_up_process(worker->task);
3095e22bee78STejun Heo 		}
3096e22bee78STejun Heo 
3097e22bee78STejun Heo 		if (need_to_create_worker(gcwq)) {
3098e22bee78STejun Heo 			spin_unlock_irq(&gcwq->lock);
3099e22bee78STejun Heo 			worker = create_worker(gcwq, false);
3100e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
3101e22bee78STejun Heo 			if (worker) {
3102cb444766STejun Heo 				worker->flags |= WORKER_ROGUE;
3103e22bee78STejun Heo 				start_worker(worker);
3104e22bee78STejun Heo 			}
3105e22bee78STejun Heo 		}
3106e22bee78STejun Heo 
3107db7bccf4STejun Heo 		/* give a breather */
3108db7bccf4STejun Heo 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3109db7bccf4STejun Heo 			break;
3110db7bccf4STejun Heo 	}
3111db7bccf4STejun Heo 
3112e22bee78STejun Heo 	/*
3113e22bee78STejun Heo 	 * Either all works have been scheduled and cpu is down, or
3114e22bee78STejun Heo 	 * cpu down has already been canceled.  Wait for and butcher
3115e22bee78STejun Heo 	 * all workers till we're canceled.
3116e22bee78STejun Heo 	 */
3117e22bee78STejun Heo 	do {
3118e22bee78STejun Heo 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3119e22bee78STejun Heo 		while (!list_empty(&gcwq->idle_list))
3120e22bee78STejun Heo 			destroy_worker(list_first_entry(&gcwq->idle_list,
3121e22bee78STejun Heo 							struct worker, entry));
3122e22bee78STejun Heo 	} while (gcwq->nr_workers && rc >= 0);
3123e22bee78STejun Heo 
3124e22bee78STejun Heo 	/*
3125e22bee78STejun Heo 	 * At this point, either draining has completed and no worker
3126e22bee78STejun Heo 	 * is left, or cpu down has been canceled or the cpu is being
3127e22bee78STejun Heo 	 * brought back up.  There shouldn't be any idle one left.
3128e22bee78STejun Heo 	 * Tell the remaining busy ones to rebind once it finishes the
3129e22bee78STejun Heo 	 * currently scheduled works by scheduling the rebind_work.
3130e22bee78STejun Heo 	 */
3131e22bee78STejun Heo 	WARN_ON(!list_empty(&gcwq->idle_list));
3132e22bee78STejun Heo 
3133e22bee78STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq) {
3134e22bee78STejun Heo 		struct work_struct *rebind_work = &worker->rebind_work;
3135e22bee78STejun Heo 
3136e22bee78STejun Heo 		/*
3137e22bee78STejun Heo 		 * Rebind_work may race with future cpu hotplug
3138e22bee78STejun Heo 		 * operations.  Use a separate flag to mark that
3139e22bee78STejun Heo 		 * rebinding is scheduled.
3140e22bee78STejun Heo 		 */
3141cb444766STejun Heo 		worker->flags |= WORKER_REBIND;
3142cb444766STejun Heo 		worker->flags &= ~WORKER_ROGUE;
3143e22bee78STejun Heo 
3144e22bee78STejun Heo 		/* queue rebind_work, wq doesn't matter, use the default one */
3145e22bee78STejun Heo 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3146e22bee78STejun Heo 				     work_data_bits(rebind_work)))
3147e22bee78STejun Heo 			continue;
3148e22bee78STejun Heo 
3149e22bee78STejun Heo 		debug_work_activate(rebind_work);
3150d320c038STejun Heo 		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3151e22bee78STejun Heo 			    worker->scheduled.next,
3152e22bee78STejun Heo 			    work_color_to_flags(WORK_NO_COLOR));
3153e22bee78STejun Heo 	}
3154e22bee78STejun Heo 
3155e22bee78STejun Heo 	/* relinquish manager role */
3156e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3157e22bee78STejun Heo 
3158db7bccf4STejun Heo 	/* notify completion */
3159db7bccf4STejun Heo 	gcwq->trustee = NULL;
3160db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_DONE;
3161db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3162db7bccf4STejun Heo 	spin_unlock_irq(&gcwq->lock);
3163db7bccf4STejun Heo 	return 0;
3164db7bccf4STejun Heo }
3165db7bccf4STejun Heo 
3166db7bccf4STejun Heo /**
3167db7bccf4STejun Heo  * wait_trustee_state - wait for trustee to enter the specified state
3168db7bccf4STejun Heo  * @gcwq: gcwq the trustee of interest belongs to
3169db7bccf4STejun Heo  * @state: target state to wait for
3170db7bccf4STejun Heo  *
3171db7bccf4STejun Heo  * Wait for the trustee to reach @state.  DONE is already matched.
3172db7bccf4STejun Heo  *
3173db7bccf4STejun Heo  * CONTEXT:
3174db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3175db7bccf4STejun Heo  * multiple times.  To be used by cpu_callback.
3176db7bccf4STejun Heo  */
3177db7bccf4STejun Heo static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3178db7bccf4STejun Heo {
3179db7bccf4STejun Heo 	if (!(gcwq->trustee_state == state ||
3180db7bccf4STejun Heo 	      gcwq->trustee_state == TRUSTEE_DONE)) {
3181db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);
3182db7bccf4STejun Heo 		__wait_event(gcwq->trustee_wait,
3183db7bccf4STejun Heo 			     gcwq->trustee_state == state ||
3184db7bccf4STejun Heo 			     gcwq->trustee_state == TRUSTEE_DONE);
3185db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);
3186db7bccf4STejun Heo 	}
3187db7bccf4STejun Heo }
3188db7bccf4STejun Heo 
31899c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
31901da177e4SLinus Torvalds 						unsigned long action,
31911da177e4SLinus Torvalds 						void *hcpu)
31921da177e4SLinus Torvalds {
31933af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
3194db7bccf4STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
3195db7bccf4STejun Heo 	struct task_struct *new_trustee = NULL;
3196e22bee78STejun Heo 	struct worker *uninitialized_var(new_worker);
3197db7bccf4STejun Heo 	unsigned long flags;
31981da177e4SLinus Torvalds 
31998bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
32008bb78442SRafael J. Wysocki 
3201db7bccf4STejun Heo 	switch (action) {
3202db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3203db7bccf4STejun Heo 		new_trustee = kthread_create(trustee_thread, gcwq,
3204db7bccf4STejun Heo 					     "workqueue_trustee/%d\n", cpu);
3205db7bccf4STejun Heo 		if (IS_ERR(new_trustee))
3206db7bccf4STejun Heo 			return notifier_from_errno(PTR_ERR(new_trustee));
3207db7bccf4STejun Heo 		kthread_bind(new_trustee, cpu);
3208e22bee78STejun Heo 		/* fall through */
3209e22bee78STejun Heo 	case CPU_UP_PREPARE:
3210e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3211e22bee78STejun Heo 		new_worker = create_worker(gcwq, false);
3212e22bee78STejun Heo 		if (!new_worker) {
3213e22bee78STejun Heo 			if (new_trustee)
3214e22bee78STejun Heo 				kthread_stop(new_trustee);
3215e22bee78STejun Heo 			return NOTIFY_BAD;
3216e22bee78STejun Heo 		}
3217db7bccf4STejun Heo 	}
32181537663fSTejun Heo 
3219db7bccf4STejun Heo 	/* some are called w/ irq disabled, don't disturb irq status */
3220db7bccf4STejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
32213af24433SOleg Nesterov 
32223af24433SOleg Nesterov 	switch (action) {
3223db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3224db7bccf4STejun Heo 		/* initialize trustee and tell it to acquire the gcwq */
3225db7bccf4STejun Heo 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3226db7bccf4STejun Heo 		gcwq->trustee = new_trustee;
3227db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_START;
3228db7bccf4STejun Heo 		wake_up_process(gcwq->trustee);
3229db7bccf4STejun Heo 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3230e22bee78STejun Heo 		/* fall through */
3231e22bee78STejun Heo 	case CPU_UP_PREPARE:
3232e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3233e22bee78STejun Heo 		gcwq->first_idle = new_worker;
3234e22bee78STejun Heo 		break;
3235e22bee78STejun Heo 
3236e22bee78STejun Heo 	case CPU_DYING:
3237e22bee78STejun Heo 		/*
3238e22bee78STejun Heo 		 * Before this, the trustee and all workers except for
3239e22bee78STejun Heo 		 * the ones which are still executing works from
3240e22bee78STejun Heo 		 * before the last CPU down must be on the cpu.  After
3241e22bee78STejun Heo 		 * this, they'll all be diasporas.
3242e22bee78STejun Heo 		 */
3243e22bee78STejun Heo 		gcwq->flags |= GCWQ_DISASSOCIATED;
3244db7bccf4STejun Heo 		break;
3245db7bccf4STejun Heo 
32463da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
3247db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_BUTCHER;
3248e22bee78STejun Heo 		/* fall through */
3249e22bee78STejun Heo 	case CPU_UP_CANCELED:
3250e22bee78STejun Heo 		destroy_worker(gcwq->first_idle);
3251e22bee78STejun Heo 		gcwq->first_idle = NULL;
3252db7bccf4STejun Heo 		break;
3253db7bccf4STejun Heo 
3254db7bccf4STejun Heo 	case CPU_DOWN_FAILED:
3255db7bccf4STejun Heo 	case CPU_ONLINE:
3256e22bee78STejun Heo 		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3257db7bccf4STejun Heo 		if (gcwq->trustee_state != TRUSTEE_DONE) {
3258db7bccf4STejun Heo 			gcwq->trustee_state = TRUSTEE_RELEASE;
3259db7bccf4STejun Heo 			wake_up_process(gcwq->trustee);
3260db7bccf4STejun Heo 			wait_trustee_state(gcwq, TRUSTEE_DONE);
3261db7bccf4STejun Heo 		}
3262db7bccf4STejun Heo 
3263e22bee78STejun Heo 		/*
3264e22bee78STejun Heo 		 * Trustee is done and there might be no worker left.
3265e22bee78STejun Heo 		 * Put the first_idle in and request a real manager to
3266e22bee78STejun Heo 		 * take a look.
3267e22bee78STejun Heo 		 */
3268e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3269e22bee78STejun Heo 		kthread_bind(gcwq->first_idle->task, cpu);
3270e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3271e22bee78STejun Heo 		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3272e22bee78STejun Heo 		start_worker(gcwq->first_idle);
3273e22bee78STejun Heo 		gcwq->first_idle = NULL;
32741da177e4SLinus Torvalds 		break;
32751da177e4SLinus Torvalds 	}
3276db7bccf4STejun Heo 
3277db7bccf4STejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
32781da177e4SLinus Torvalds 
32791537663fSTejun Heo 	return notifier_from_errno(0);
32801da177e4SLinus Torvalds }
32811da177e4SLinus Torvalds 
32822d3854a3SRusty Russell #ifdef CONFIG_SMP
32838ccad40dSRusty Russell 
32842d3854a3SRusty Russell struct work_for_cpu {
32856b44003eSAndrew Morton 	struct completion completion;
32862d3854a3SRusty Russell 	long (*fn)(void *);
32872d3854a3SRusty Russell 	void *arg;
32882d3854a3SRusty Russell 	long ret;
32892d3854a3SRusty Russell };
32902d3854a3SRusty Russell 
32916b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
32922d3854a3SRusty Russell {
32936b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
32942d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
32956b44003eSAndrew Morton 	complete(&wfc->completion);
32966b44003eSAndrew Morton 	return 0;
32972d3854a3SRusty Russell }
32982d3854a3SRusty Russell 
32992d3854a3SRusty Russell /**
33002d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
33012d3854a3SRusty Russell  * @cpu: the cpu to run on
33022d3854a3SRusty Russell  * @fn: the function to run
33032d3854a3SRusty Russell  * @arg: the function arg
33042d3854a3SRusty Russell  *
330531ad9081SRusty Russell  * This will return the value @fn returns.
330631ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
33076b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
33082d3854a3SRusty Russell  */
33092d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
33102d3854a3SRusty Russell {
33116b44003eSAndrew Morton 	struct task_struct *sub_thread;
33126b44003eSAndrew Morton 	struct work_for_cpu wfc = {
33136b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
33146b44003eSAndrew Morton 		.fn = fn,
33156b44003eSAndrew Morton 		.arg = arg,
33166b44003eSAndrew Morton 	};
33172d3854a3SRusty Russell 
33186b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
33196b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
33206b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
33216b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
33226b44003eSAndrew Morton 	wake_up_process(sub_thread);
33236b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
33242d3854a3SRusty Russell 	return wfc.ret;
33252d3854a3SRusty Russell }
33262d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
33272d3854a3SRusty Russell #endif /* CONFIG_SMP */
33282d3854a3SRusty Russell 
3329a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
3330a0a1a5fdSTejun Heo 
3331a0a1a5fdSTejun Heo /**
3332a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
3333a0a1a5fdSTejun Heo  *
3334a0a1a5fdSTejun Heo  * Start freezing workqueues.  After this function returns, all
3335a0a1a5fdSTejun Heo  * freezeable workqueues will queue new works to their frozen_works
33367e11629dSTejun Heo  * list instead of gcwq->worklist.
3337a0a1a5fdSTejun Heo  *
3338a0a1a5fdSTejun Heo  * CONTEXT:
33398b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3340a0a1a5fdSTejun Heo  */
3341a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
3342a0a1a5fdSTejun Heo {
3343a0a1a5fdSTejun Heo 	unsigned int cpu;
3344a0a1a5fdSTejun Heo 
3345a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3346a0a1a5fdSTejun Heo 
3347a0a1a5fdSTejun Heo 	BUG_ON(workqueue_freezing);
3348a0a1a5fdSTejun Heo 	workqueue_freezing = true;
3349a0a1a5fdSTejun Heo 
3350f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
33518b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3352bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
33538b03ae3cSTejun Heo 
33548b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
33558b03ae3cSTejun Heo 
3356db7bccf4STejun Heo 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3357db7bccf4STejun Heo 		gcwq->flags |= GCWQ_FREEZING;
3358db7bccf4STejun Heo 
3359a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3360a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3361a0a1a5fdSTejun Heo 
3362f3421797STejun Heo 			if (cwq && wq->flags & WQ_FREEZEABLE)
3363a0a1a5fdSTejun Heo 				cwq->max_active = 0;
3364a0a1a5fdSTejun Heo 		}
33658b03ae3cSTejun Heo 
33668b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3367a0a1a5fdSTejun Heo 	}
3368a0a1a5fdSTejun Heo 
3369a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3370a0a1a5fdSTejun Heo }
3371a0a1a5fdSTejun Heo 
3372a0a1a5fdSTejun Heo /**
3373a0a1a5fdSTejun Heo  * freeze_workqueues_busy - are freezeable workqueues still busy?
3374a0a1a5fdSTejun Heo  *
3375a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
3376a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
3377a0a1a5fdSTejun Heo  *
3378a0a1a5fdSTejun Heo  * CONTEXT:
3379a0a1a5fdSTejun Heo  * Grabs and releases workqueue_lock.
3380a0a1a5fdSTejun Heo  *
3381a0a1a5fdSTejun Heo  * RETURNS:
3382a0a1a5fdSTejun Heo  * %true if some freezeable workqueues are still busy.  %false if
3383a0a1a5fdSTejun Heo  * freezing is complete.
3384a0a1a5fdSTejun Heo  */
3385a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
3386a0a1a5fdSTejun Heo {
3387a0a1a5fdSTejun Heo 	unsigned int cpu;
3388a0a1a5fdSTejun Heo 	bool busy = false;
3389a0a1a5fdSTejun Heo 
3390a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3391a0a1a5fdSTejun Heo 
3392a0a1a5fdSTejun Heo 	BUG_ON(!workqueue_freezing);
3393a0a1a5fdSTejun Heo 
3394f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
3395bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
3396a0a1a5fdSTejun Heo 		/*
3397a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
3398a0a1a5fdSTejun Heo 		 * to peek without lock.
3399a0a1a5fdSTejun Heo 		 */
3400a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3401a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3402a0a1a5fdSTejun Heo 
3403f3421797STejun Heo 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3404a0a1a5fdSTejun Heo 				continue;
3405a0a1a5fdSTejun Heo 
3406a0a1a5fdSTejun Heo 			BUG_ON(cwq->nr_active < 0);
3407a0a1a5fdSTejun Heo 			if (cwq->nr_active) {
3408a0a1a5fdSTejun Heo 				busy = true;
3409a0a1a5fdSTejun Heo 				goto out_unlock;
3410a0a1a5fdSTejun Heo 			}
3411a0a1a5fdSTejun Heo 		}
3412a0a1a5fdSTejun Heo 	}
3413a0a1a5fdSTejun Heo out_unlock:
3414a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3415a0a1a5fdSTejun Heo 	return busy;
3416a0a1a5fdSTejun Heo }
3417a0a1a5fdSTejun Heo 
3418a0a1a5fdSTejun Heo /**
3419a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
3420a0a1a5fdSTejun Heo  *
3421a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
34227e11629dSTejun Heo  * frozen works are transferred to their respective gcwq worklists.
3423a0a1a5fdSTejun Heo  *
3424a0a1a5fdSTejun Heo  * CONTEXT:
34258b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3426a0a1a5fdSTejun Heo  */
3427a0a1a5fdSTejun Heo void thaw_workqueues(void)
3428a0a1a5fdSTejun Heo {
3429a0a1a5fdSTejun Heo 	unsigned int cpu;
3430a0a1a5fdSTejun Heo 
3431a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3432a0a1a5fdSTejun Heo 
3433a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
3434a0a1a5fdSTejun Heo 		goto out_unlock;
3435a0a1a5fdSTejun Heo 
3436f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
34378b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3438bdbc5dd7STejun Heo 		struct workqueue_struct *wq;
34398b03ae3cSTejun Heo 
34408b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
34418b03ae3cSTejun Heo 
3442db7bccf4STejun Heo 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3443db7bccf4STejun Heo 		gcwq->flags &= ~GCWQ_FREEZING;
3444db7bccf4STejun Heo 
3445a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3446a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3447a0a1a5fdSTejun Heo 
3448f3421797STejun Heo 			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
3449a0a1a5fdSTejun Heo 				continue;
3450a0a1a5fdSTejun Heo 
3451a0a1a5fdSTejun Heo 			/* restore max_active and repopulate worklist */
3452a0a1a5fdSTejun Heo 			cwq->max_active = wq->saved_max_active;
3453a0a1a5fdSTejun Heo 
3454a0a1a5fdSTejun Heo 			while (!list_empty(&cwq->delayed_works) &&
3455a0a1a5fdSTejun Heo 			       cwq->nr_active < cwq->max_active)
3456a0a1a5fdSTejun Heo 				cwq_activate_first_delayed(cwq);
3457a0a1a5fdSTejun Heo 		}
34588b03ae3cSTejun Heo 
3459e22bee78STejun Heo 		wake_up_worker(gcwq);
3460e22bee78STejun Heo 
34618b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3462a0a1a5fdSTejun Heo 	}
3463a0a1a5fdSTejun Heo 
3464a0a1a5fdSTejun Heo 	workqueue_freezing = false;
3465a0a1a5fdSTejun Heo out_unlock:
3466a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3467a0a1a5fdSTejun Heo }
3468a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
3469a0a1a5fdSTejun Heo 
3470c12920d1SOleg Nesterov void __init init_workqueues(void)
34711da177e4SLinus Torvalds {
3472c34056a3STejun Heo 	unsigned int cpu;
3473c8e55f36STejun Heo 	int i;
3474c34056a3STejun Heo 
34757a22ad75STejun Heo 	/*
34767a22ad75STejun Heo 	 * The pointer part of work->data is either pointing to the
34777a22ad75STejun Heo 	 * cwq or contains the cpu number the work ran last on.  Make
34787a22ad75STejun Heo 	 * sure cpu number won't overflow into kernel pointer area so
34797a22ad75STejun Heo 	 * that they can be distinguished.
34807a22ad75STejun Heo 	 */
3481bdbc5dd7STejun Heo 	BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
34827a22ad75STejun Heo 
3483db7bccf4STejun Heo 	hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
34848b03ae3cSTejun Heo 
34858b03ae3cSTejun Heo 	/* initialize gcwqs */
3486f3421797STejun Heo 	for_each_gcwq_cpu(cpu) {
34878b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
34888b03ae3cSTejun Heo 
34898b03ae3cSTejun Heo 		spin_lock_init(&gcwq->lock);
34907e11629dSTejun Heo 		INIT_LIST_HEAD(&gcwq->worklist);
34918b03ae3cSTejun Heo 		gcwq->cpu = cpu;
3492f3421797STejun Heo 		if (cpu == WORK_CPU_UNBOUND)
3493f3421797STejun Heo 			gcwq->flags |= GCWQ_DISASSOCIATED;
34948b03ae3cSTejun Heo 
3495c8e55f36STejun Heo 		INIT_LIST_HEAD(&gcwq->idle_list);
3496c8e55f36STejun Heo 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3497c8e55f36STejun Heo 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3498c8e55f36STejun Heo 
3499e22bee78STejun Heo 		init_timer_deferrable(&gcwq->idle_timer);
3500e22bee78STejun Heo 		gcwq->idle_timer.function = idle_worker_timeout;
3501e22bee78STejun Heo 		gcwq->idle_timer.data = (unsigned long)gcwq;
3502e22bee78STejun Heo 
3503e22bee78STejun Heo 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3504e22bee78STejun Heo 			    (unsigned long)gcwq);
3505e22bee78STejun Heo 
35068b03ae3cSTejun Heo 		ida_init(&gcwq->worker_ida);
3507db7bccf4STejun Heo 
3508db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_DONE;
3509db7bccf4STejun Heo 		init_waitqueue_head(&gcwq->trustee_wait);
35108b03ae3cSTejun Heo 	}
35118b03ae3cSTejun Heo 
3512e22bee78STejun Heo 	/* create the initial worker */
3513f3421797STejun Heo 	for_each_online_gcwq_cpu(cpu) {
3514e22bee78STejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3515e22bee78STejun Heo 		struct worker *worker;
3516e22bee78STejun Heo 
3517e22bee78STejun Heo 		worker = create_worker(gcwq, true);
3518e22bee78STejun Heo 		BUG_ON(!worker);
3519e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3520e22bee78STejun Heo 		start_worker(worker);
3521e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3522e22bee78STejun Heo 	}
3523e22bee78STejun Heo 
3524d320c038STejun Heo 	system_wq = alloc_workqueue("events", 0, 0);
3525d320c038STejun Heo 	system_long_wq = alloc_workqueue("events_long", 0, 0);
3526d320c038STejun Heo 	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3527f3421797STejun Heo 	system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3528f3421797STejun Heo 					    WQ_UNBOUND_MAX_ACTIVE);
3529d320c038STejun Heo 	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
35301da177e4SLinus Torvalds }
3531