xref: /linux-6.15/kernel/workqueue.c (revision 649027d7)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36c34056a3STejun Heo #include <linux/idr.h>
37e22bee78STejun Heo 
38e22bee78STejun Heo #include "workqueue_sched.h"
391da177e4SLinus Torvalds 
40c8e55f36STejun Heo enum {
41db7bccf4STejun Heo 	/* global_cwq flags */
42e22bee78STejun Heo 	GCWQ_MANAGE_WORKERS	= 1 << 0,	/* need to manage workers */
43e22bee78STejun Heo 	GCWQ_MANAGING_WORKERS	= 1 << 1,	/* managing workers */
44e22bee78STejun Heo 	GCWQ_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
45db7bccf4STejun Heo 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
46*649027d7STejun Heo 	GCWQ_HIGHPRI_PENDING	= 1 << 4,	/* highpri works on queue */
47db7bccf4STejun Heo 
48c8e55f36STejun Heo 	/* worker flags */
49c8e55f36STejun Heo 	WORKER_STARTED		= 1 << 0,	/* started */
50c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
51c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
52e22bee78STejun Heo 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
53db7bccf4STejun Heo 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
54e22bee78STejun Heo 	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
55e22bee78STejun Heo 
56e22bee78STejun Heo 	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND,
57db7bccf4STejun Heo 
58db7bccf4STejun Heo 	/* gcwq->trustee_state */
59db7bccf4STejun Heo 	TRUSTEE_START		= 0,		/* start */
60db7bccf4STejun Heo 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
61db7bccf4STejun Heo 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
62db7bccf4STejun Heo 	TRUSTEE_RELEASE		= 3,		/* release workers */
63db7bccf4STejun Heo 	TRUSTEE_DONE		= 4,		/* trustee is done */
64c8e55f36STejun Heo 
65c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
66c8e55f36STejun Heo 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
67c8e55f36STejun Heo 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
68db7bccf4STejun Heo 
69e22bee78STejun Heo 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
70e22bee78STejun Heo 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
71e22bee78STejun Heo 
72e22bee78STejun Heo 	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
73e22bee78STejun Heo 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
74e22bee78STejun Heo 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
75db7bccf4STejun Heo 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
76e22bee78STejun Heo 
77e22bee78STejun Heo 	/*
78e22bee78STejun Heo 	 * Rescue workers are used only on emergencies and shared by
79e22bee78STejun Heo 	 * all cpus.  Give -20.
80e22bee78STejun Heo 	 */
81e22bee78STejun Heo 	RESCUER_NICE_LEVEL	= -20,
82c8e55f36STejun Heo };
83c8e55f36STejun Heo 
841da177e4SLinus Torvalds /*
854690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
864690c4abSTejun Heo  *
874690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
884690c4abSTejun Heo  *
89e22bee78STejun Heo  * P: Preemption protected.  Disabling preemption is enough and should
90e22bee78STejun Heo  *    only be modified and accessed from the local cpu.
91e22bee78STejun Heo  *
928b03ae3cSTejun Heo  * L: gcwq->lock protected.  Access with gcwq->lock held.
934690c4abSTejun Heo  *
94e22bee78STejun Heo  * X: During normal operation, modification requires gcwq->lock and
95e22bee78STejun Heo  *    should be done only from local cpu.  Either disabling preemption
96e22bee78STejun Heo  *    on local cpu or grabbing gcwq->lock is enough for read access.
97e22bee78STejun Heo  *    While trustee is in charge, it's identical to L.
98e22bee78STejun Heo  *
9973f53c4aSTejun Heo  * F: wq->flush_mutex protected.
10073f53c4aSTejun Heo  *
1014690c4abSTejun Heo  * W: workqueue_lock protected.
1024690c4abSTejun Heo  */
1034690c4abSTejun Heo 
1048b03ae3cSTejun Heo struct global_cwq;
105c34056a3STejun Heo 
106e22bee78STejun Heo /*
107e22bee78STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers
108e22bee78STejun Heo  * are either serving the manager role, on idle list or on busy hash.
109e22bee78STejun Heo  */
110c34056a3STejun Heo struct worker {
111c8e55f36STejun Heo 	/* on idle list while idle, on busy hash table while busy */
112c8e55f36STejun Heo 	union {
113c8e55f36STejun Heo 		struct list_head	entry;	/* L: while idle */
114c8e55f36STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
115c8e55f36STejun Heo 	};
116c8e55f36STejun Heo 
117c34056a3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
1188cca0eeaSTejun Heo 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
119affee4b2STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
120c34056a3STejun Heo 	struct task_struct	*task;		/* I: worker task */
1218b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
122e22bee78STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
123e22bee78STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
124e22bee78STejun Heo 	unsigned int		flags;		/* X: flags */
125c34056a3STejun Heo 	int			id;		/* I: worker id */
126e22bee78STejun Heo 	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
127c34056a3STejun Heo };
128c34056a3STejun Heo 
1294690c4abSTejun Heo /*
130e22bee78STejun Heo  * Global per-cpu workqueue.  There's one and only one for each cpu
131e22bee78STejun Heo  * and all works are queued and processed here regardless of their
132e22bee78STejun Heo  * target workqueues.
1338b03ae3cSTejun Heo  */
1348b03ae3cSTejun Heo struct global_cwq {
1358b03ae3cSTejun Heo 	spinlock_t		lock;		/* the gcwq lock */
1367e11629dSTejun Heo 	struct list_head	worklist;	/* L: list of pending works */
1378b03ae3cSTejun Heo 	unsigned int		cpu;		/* I: the associated cpu */
138db7bccf4STejun Heo 	unsigned int		flags;		/* L: GCWQ_* flags */
139c8e55f36STejun Heo 
140c8e55f36STejun Heo 	int			nr_workers;	/* L: total number of workers */
141c8e55f36STejun Heo 	int			nr_idle;	/* L: currently idle ones */
142c8e55f36STejun Heo 
143c8e55f36STejun Heo 	/* workers are chained either in the idle_list or busy_hash */
144e22bee78STejun Heo 	struct list_head	idle_list;	/* X: list of idle workers */
145c8e55f36STejun Heo 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
146c8e55f36STejun Heo 						/* L: hash of busy workers */
147c8e55f36STejun Heo 
148e22bee78STejun Heo 	struct timer_list	idle_timer;	/* L: worker idle timeout */
149e22bee78STejun Heo 	struct timer_list	mayday_timer;	/* L: SOS timer for dworkers */
150e22bee78STejun Heo 
1518b03ae3cSTejun Heo 	struct ida		worker_ida;	/* L: for worker IDs */
152db7bccf4STejun Heo 
153db7bccf4STejun Heo 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
154db7bccf4STejun Heo 	unsigned int		trustee_state;	/* L: trustee state */
155db7bccf4STejun Heo 	wait_queue_head_t	trustee_wait;	/* trustee wait */
156e22bee78STejun Heo 	struct worker		*first_idle;	/* L: first idle worker */
1578b03ae3cSTejun Heo } ____cacheline_aligned_in_smp;
1588b03ae3cSTejun Heo 
1598b03ae3cSTejun Heo /*
160502ca9d8STejun Heo  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
1610f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
1620f900049STejun Heo  * aligned at two's power of the number of flag bits.
1631da177e4SLinus Torvalds  */
1641da177e4SLinus Torvalds struct cpu_workqueue_struct {
1658b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
1664690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
16773f53c4aSTejun Heo 	int			work_color;	/* L: current color */
16873f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
16973f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
17073f53c4aSTejun Heo 						/* L: nr of in_flight works */
1711e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
172a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
1731e19ffc6STejun Heo 	struct list_head	delayed_works;	/* L: delayed works */
1740f900049STejun Heo };
1751da177e4SLinus Torvalds 
1761da177e4SLinus Torvalds /*
17773f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
17873f53c4aSTejun Heo  */
17973f53c4aSTejun Heo struct wq_flusher {
18073f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
18173f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
18273f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
18373f53c4aSTejun Heo };
18473f53c4aSTejun Heo 
18573f53c4aSTejun Heo /*
1861da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
1871da177e4SLinus Torvalds  * per-CPU workqueues:
1881da177e4SLinus Torvalds  */
1891da177e4SLinus Torvalds struct workqueue_struct {
19097e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
1914690c4abSTejun Heo 	struct cpu_workqueue_struct *cpu_wq;	/* I: cwq's */
1924690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
19373f53c4aSTejun Heo 
19473f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
19573f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
19673f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
19773f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
19873f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
19973f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
20073f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
20173f53c4aSTejun Heo 
202502ca9d8STejun Heo 	unsigned long		single_cpu;	/* cpu for single cpu wq */
203502ca9d8STejun Heo 
204e22bee78STejun Heo 	cpumask_var_t		mayday_mask;	/* cpus requesting rescue */
205e22bee78STejun Heo 	struct worker		*rescuer;	/* I: rescue worker */
206e22bee78STejun Heo 
207dcd989cbSTejun Heo 	int			saved_max_active; /* W: saved cwq max_active */
2084690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
2094e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
2104e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
2114e6045f1SJohannes Berg #endif
2121da177e4SLinus Torvalds };
2131da177e4SLinus Torvalds 
214d320c038STejun Heo struct workqueue_struct *system_wq __read_mostly;
215d320c038STejun Heo struct workqueue_struct *system_long_wq __read_mostly;
216d320c038STejun Heo struct workqueue_struct *system_nrt_wq __read_mostly;
217d320c038STejun Heo EXPORT_SYMBOL_GPL(system_wq);
218d320c038STejun Heo EXPORT_SYMBOL_GPL(system_long_wq);
219d320c038STejun Heo EXPORT_SYMBOL_GPL(system_nrt_wq);
220d320c038STejun Heo 
221db7bccf4STejun Heo #define for_each_busy_worker(worker, i, pos, gcwq)			\
222db7bccf4STejun Heo 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
223db7bccf4STejun Heo 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
224db7bccf4STejun Heo 
225dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
226dc186ad7SThomas Gleixner 
227dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
228dc186ad7SThomas Gleixner 
229dc186ad7SThomas Gleixner /*
230dc186ad7SThomas Gleixner  * fixup_init is called when:
231dc186ad7SThomas Gleixner  * - an active object is initialized
232dc186ad7SThomas Gleixner  */
233dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
234dc186ad7SThomas Gleixner {
235dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
236dc186ad7SThomas Gleixner 
237dc186ad7SThomas Gleixner 	switch (state) {
238dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
239dc186ad7SThomas Gleixner 		cancel_work_sync(work);
240dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
241dc186ad7SThomas Gleixner 		return 1;
242dc186ad7SThomas Gleixner 	default:
243dc186ad7SThomas Gleixner 		return 0;
244dc186ad7SThomas Gleixner 	}
245dc186ad7SThomas Gleixner }
246dc186ad7SThomas Gleixner 
247dc186ad7SThomas Gleixner /*
248dc186ad7SThomas Gleixner  * fixup_activate is called when:
249dc186ad7SThomas Gleixner  * - an active object is activated
250dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
251dc186ad7SThomas Gleixner  */
252dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
253dc186ad7SThomas Gleixner {
254dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
255dc186ad7SThomas Gleixner 
256dc186ad7SThomas Gleixner 	switch (state) {
257dc186ad7SThomas Gleixner 
258dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
259dc186ad7SThomas Gleixner 		/*
260dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
261dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
262dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
263dc186ad7SThomas Gleixner 		 */
26422df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
265dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
266dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
267dc186ad7SThomas Gleixner 			return 0;
268dc186ad7SThomas Gleixner 		}
269dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
270dc186ad7SThomas Gleixner 		return 0;
271dc186ad7SThomas Gleixner 
272dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
273dc186ad7SThomas Gleixner 		WARN_ON(1);
274dc186ad7SThomas Gleixner 
275dc186ad7SThomas Gleixner 	default:
276dc186ad7SThomas Gleixner 		return 0;
277dc186ad7SThomas Gleixner 	}
278dc186ad7SThomas Gleixner }
279dc186ad7SThomas Gleixner 
280dc186ad7SThomas Gleixner /*
281dc186ad7SThomas Gleixner  * fixup_free is called when:
282dc186ad7SThomas Gleixner  * - an active object is freed
283dc186ad7SThomas Gleixner  */
284dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
285dc186ad7SThomas Gleixner {
286dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
287dc186ad7SThomas Gleixner 
288dc186ad7SThomas Gleixner 	switch (state) {
289dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
290dc186ad7SThomas Gleixner 		cancel_work_sync(work);
291dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
292dc186ad7SThomas Gleixner 		return 1;
293dc186ad7SThomas Gleixner 	default:
294dc186ad7SThomas Gleixner 		return 0;
295dc186ad7SThomas Gleixner 	}
296dc186ad7SThomas Gleixner }
297dc186ad7SThomas Gleixner 
298dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
299dc186ad7SThomas Gleixner 	.name		= "work_struct",
300dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
301dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
302dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
303dc186ad7SThomas Gleixner };
304dc186ad7SThomas Gleixner 
305dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
306dc186ad7SThomas Gleixner {
307dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
308dc186ad7SThomas Gleixner }
309dc186ad7SThomas Gleixner 
310dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
311dc186ad7SThomas Gleixner {
312dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
313dc186ad7SThomas Gleixner }
314dc186ad7SThomas Gleixner 
315dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
316dc186ad7SThomas Gleixner {
317dc186ad7SThomas Gleixner 	if (onstack)
318dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
319dc186ad7SThomas Gleixner 	else
320dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
321dc186ad7SThomas Gleixner }
322dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
323dc186ad7SThomas Gleixner 
324dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
325dc186ad7SThomas Gleixner {
326dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
327dc186ad7SThomas Gleixner }
328dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
329dc186ad7SThomas Gleixner 
330dc186ad7SThomas Gleixner #else
331dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
332dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
333dc186ad7SThomas Gleixner #endif
334dc186ad7SThomas Gleixner 
33595402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
33695402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
3371da177e4SLinus Torvalds static LIST_HEAD(workqueues);
338a0a1a5fdSTejun Heo static bool workqueue_freezing;		/* W: have wqs started freezing? */
339c34056a3STejun Heo 
340e22bee78STejun Heo /*
341e22bee78STejun Heo  * The almighty global cpu workqueues.  nr_running is the only field
342e22bee78STejun Heo  * which is expected to be used frequently by other cpus via
343e22bee78STejun Heo  * try_to_wake_up().  Put it in a separate cacheline.
344e22bee78STejun Heo  */
3458b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq);
346e22bee78STejun Heo static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
3478b03ae3cSTejun Heo 
348c34056a3STejun Heo static int worker_thread(void *__worker);
3491da177e4SLinus Torvalds 
3508b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu)
3518b03ae3cSTejun Heo {
3528b03ae3cSTejun Heo 	return &per_cpu(global_cwq, cpu);
3538b03ae3cSTejun Heo }
3548b03ae3cSTejun Heo 
355e22bee78STejun Heo static atomic_t *get_gcwq_nr_running(unsigned int cpu)
356e22bee78STejun Heo {
357e22bee78STejun Heo 	return &per_cpu(gcwq_nr_running, cpu);
358e22bee78STejun Heo }
359e22bee78STejun Heo 
3604690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
3614690c4abSTejun Heo 					    struct workqueue_struct *wq)
362a848e3b6SOleg Nesterov {
363a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
364a848e3b6SOleg Nesterov }
365a848e3b6SOleg Nesterov 
36673f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
36773f53c4aSTejun Heo {
36873f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
36973f53c4aSTejun Heo }
37073f53c4aSTejun Heo 
37173f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
37273f53c4aSTejun Heo {
37373f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
37473f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
37573f53c4aSTejun Heo }
37673f53c4aSTejun Heo 
37773f53c4aSTejun Heo static int work_next_color(int color)
37873f53c4aSTejun Heo {
37973f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
38073f53c4aSTejun Heo }
38173f53c4aSTejun Heo 
3824594bf15SDavid Howells /*
3837a22ad75STejun Heo  * Work data points to the cwq while a work is on queue.  Once
3847a22ad75STejun Heo  * execution starts, it points to the cpu the work was last on.  This
3857a22ad75STejun Heo  * can be distinguished by comparing the data value against
3867a22ad75STejun Heo  * PAGE_OFFSET.
3877a22ad75STejun Heo  *
3887a22ad75STejun Heo  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
3897a22ad75STejun Heo  * cwq, cpu or clear work->data.  These functions should only be
3907a22ad75STejun Heo  * called while the work is owned - ie. while the PENDING bit is set.
3917a22ad75STejun Heo  *
3927a22ad75STejun Heo  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
3937a22ad75STejun Heo  * corresponding to a work.  gcwq is available once the work has been
3947a22ad75STejun Heo  * queued anywhere after initialization.  cwq is available only from
3957a22ad75STejun Heo  * queueing until execution starts.
3964594bf15SDavid Howells  */
3977a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data,
3987a22ad75STejun Heo 				 unsigned long flags)
3997a22ad75STejun Heo {
4007a22ad75STejun Heo 	BUG_ON(!work_pending(work));
4017a22ad75STejun Heo 	atomic_long_set(&work->data, data | flags | work_static(work));
4027a22ad75STejun Heo }
4037a22ad75STejun Heo 
4047a22ad75STejun Heo static void set_work_cwq(struct work_struct *work,
4054690c4abSTejun Heo 			 struct cpu_workqueue_struct *cwq,
4064690c4abSTejun Heo 			 unsigned long extra_flags)
407365970a1SDavid Howells {
4087a22ad75STejun Heo 	set_work_data(work, (unsigned long)cwq,
40922df02bbSTejun Heo 		      WORK_STRUCT_PENDING | extra_flags);
410365970a1SDavid Howells }
411365970a1SDavid Howells 
4127a22ad75STejun Heo static void set_work_cpu(struct work_struct *work, unsigned int cpu)
4134d707b9fSOleg Nesterov {
4147a22ad75STejun Heo 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
4154d707b9fSOleg Nesterov }
4164d707b9fSOleg Nesterov 
4177a22ad75STejun Heo static void clear_work_data(struct work_struct *work)
418365970a1SDavid Howells {
4197a22ad75STejun Heo 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
4207a22ad75STejun Heo }
4217a22ad75STejun Heo 
4227a22ad75STejun Heo static inline unsigned long get_work_data(struct work_struct *work)
4237a22ad75STejun Heo {
4247a22ad75STejun Heo 	return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
4257a22ad75STejun Heo }
4267a22ad75STejun Heo 
4277a22ad75STejun Heo static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
4287a22ad75STejun Heo {
4297a22ad75STejun Heo 	unsigned long data = get_work_data(work);
4307a22ad75STejun Heo 
4317a22ad75STejun Heo 	return data >= PAGE_OFFSET ? (void *)data : NULL;
4327a22ad75STejun Heo }
4337a22ad75STejun Heo 
4347a22ad75STejun Heo static struct global_cwq *get_work_gcwq(struct work_struct *work)
4357a22ad75STejun Heo {
4367a22ad75STejun Heo 	unsigned long data = get_work_data(work);
4377a22ad75STejun Heo 	unsigned int cpu;
4387a22ad75STejun Heo 
4397a22ad75STejun Heo 	if (data >= PAGE_OFFSET)
4407a22ad75STejun Heo 		return ((struct cpu_workqueue_struct *)data)->gcwq;
4417a22ad75STejun Heo 
4427a22ad75STejun Heo 	cpu = data >> WORK_STRUCT_FLAG_BITS;
4437a22ad75STejun Heo 	if (cpu == NR_CPUS)
4447a22ad75STejun Heo 		return NULL;
4457a22ad75STejun Heo 
4467a22ad75STejun Heo 	BUG_ON(cpu >= num_possible_cpus());
4477a22ad75STejun Heo 	return get_gcwq(cpu);
448365970a1SDavid Howells }
449365970a1SDavid Howells 
450e22bee78STejun Heo /*
451e22bee78STejun Heo  * Policy functions.  These define the policies on how the global
452e22bee78STejun Heo  * worker pool is managed.  Unless noted otherwise, these functions
453e22bee78STejun Heo  * assume that they're being called with gcwq->lock held.
454e22bee78STejun Heo  */
455e22bee78STejun Heo 
456*649027d7STejun Heo static bool __need_more_worker(struct global_cwq *gcwq)
457*649027d7STejun Heo {
458*649027d7STejun Heo 	return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
459*649027d7STejun Heo 		gcwq->flags & GCWQ_HIGHPRI_PENDING;
460*649027d7STejun Heo }
461*649027d7STejun Heo 
462e22bee78STejun Heo /*
463e22bee78STejun Heo  * Need to wake up a worker?  Called from anything but currently
464e22bee78STejun Heo  * running workers.
465e22bee78STejun Heo  */
466e22bee78STejun Heo static bool need_more_worker(struct global_cwq *gcwq)
467e22bee78STejun Heo {
468*649027d7STejun Heo 	return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
469e22bee78STejun Heo }
470e22bee78STejun Heo 
471e22bee78STejun Heo /* Can I start working?  Called from busy but !running workers. */
472e22bee78STejun Heo static bool may_start_working(struct global_cwq *gcwq)
473e22bee78STejun Heo {
474e22bee78STejun Heo 	return gcwq->nr_idle;
475e22bee78STejun Heo }
476e22bee78STejun Heo 
477e22bee78STejun Heo /* Do I need to keep working?  Called from currently running workers. */
478e22bee78STejun Heo static bool keep_working(struct global_cwq *gcwq)
479e22bee78STejun Heo {
480e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
481e22bee78STejun Heo 
482e22bee78STejun Heo 	return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
483e22bee78STejun Heo }
484e22bee78STejun Heo 
485e22bee78STejun Heo /* Do we need a new worker?  Called from manager. */
486e22bee78STejun Heo static bool need_to_create_worker(struct global_cwq *gcwq)
487e22bee78STejun Heo {
488e22bee78STejun Heo 	return need_more_worker(gcwq) && !may_start_working(gcwq);
489e22bee78STejun Heo }
490e22bee78STejun Heo 
491e22bee78STejun Heo /* Do I need to be the manager? */
492e22bee78STejun Heo static bool need_to_manage_workers(struct global_cwq *gcwq)
493e22bee78STejun Heo {
494e22bee78STejun Heo 	return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
495e22bee78STejun Heo }
496e22bee78STejun Heo 
497e22bee78STejun Heo /* Do we have too many workers and should some go away? */
498e22bee78STejun Heo static bool too_many_workers(struct global_cwq *gcwq)
499e22bee78STejun Heo {
500e22bee78STejun Heo 	bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
501e22bee78STejun Heo 	int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
502e22bee78STejun Heo 	int nr_busy = gcwq->nr_workers - nr_idle;
503e22bee78STejun Heo 
504e22bee78STejun Heo 	return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
505e22bee78STejun Heo }
506e22bee78STejun Heo 
507e22bee78STejun Heo /*
508e22bee78STejun Heo  * Wake up functions.
509e22bee78STejun Heo  */
510e22bee78STejun Heo 
5117e11629dSTejun Heo /* Return the first worker.  Safe with preemption disabled */
5127e11629dSTejun Heo static struct worker *first_worker(struct global_cwq *gcwq)
5137e11629dSTejun Heo {
5147e11629dSTejun Heo 	if (unlikely(list_empty(&gcwq->idle_list)))
5157e11629dSTejun Heo 		return NULL;
5167e11629dSTejun Heo 
5177e11629dSTejun Heo 	return list_first_entry(&gcwq->idle_list, struct worker, entry);
5187e11629dSTejun Heo }
5197e11629dSTejun Heo 
5207e11629dSTejun Heo /**
5217e11629dSTejun Heo  * wake_up_worker - wake up an idle worker
5227e11629dSTejun Heo  * @gcwq: gcwq to wake worker for
5237e11629dSTejun Heo  *
5247e11629dSTejun Heo  * Wake up the first idle worker of @gcwq.
5257e11629dSTejun Heo  *
5267e11629dSTejun Heo  * CONTEXT:
5277e11629dSTejun Heo  * spin_lock_irq(gcwq->lock).
5287e11629dSTejun Heo  */
5297e11629dSTejun Heo static void wake_up_worker(struct global_cwq *gcwq)
5307e11629dSTejun Heo {
5317e11629dSTejun Heo 	struct worker *worker = first_worker(gcwq);
5327e11629dSTejun Heo 
5337e11629dSTejun Heo 	if (likely(worker))
5347e11629dSTejun Heo 		wake_up_process(worker->task);
5357e11629dSTejun Heo }
5367e11629dSTejun Heo 
5374690c4abSTejun Heo /**
538e22bee78STejun Heo  * wq_worker_waking_up - a worker is waking up
539e22bee78STejun Heo  * @task: task waking up
540e22bee78STejun Heo  * @cpu: CPU @task is waking up to
541e22bee78STejun Heo  *
542e22bee78STejun Heo  * This function is called during try_to_wake_up() when a worker is
543e22bee78STejun Heo  * being awoken.
544e22bee78STejun Heo  *
545e22bee78STejun Heo  * CONTEXT:
546e22bee78STejun Heo  * spin_lock_irq(rq->lock)
547e22bee78STejun Heo  */
548e22bee78STejun Heo void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
549e22bee78STejun Heo {
550e22bee78STejun Heo 	struct worker *worker = kthread_data(task);
551e22bee78STejun Heo 
552e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
553e22bee78STejun Heo 		atomic_inc(get_gcwq_nr_running(cpu));
554e22bee78STejun Heo }
555e22bee78STejun Heo 
556e22bee78STejun Heo /**
557e22bee78STejun Heo  * wq_worker_sleeping - a worker is going to sleep
558e22bee78STejun Heo  * @task: task going to sleep
559e22bee78STejun Heo  * @cpu: CPU in question, must be the current CPU number
560e22bee78STejun Heo  *
561e22bee78STejun Heo  * This function is called during schedule() when a busy worker is
562e22bee78STejun Heo  * going to sleep.  Worker on the same cpu can be woken up by
563e22bee78STejun Heo  * returning pointer to its task.
564e22bee78STejun Heo  *
565e22bee78STejun Heo  * CONTEXT:
566e22bee78STejun Heo  * spin_lock_irq(rq->lock)
567e22bee78STejun Heo  *
568e22bee78STejun Heo  * RETURNS:
569e22bee78STejun Heo  * Worker task on @cpu to wake up, %NULL if none.
570e22bee78STejun Heo  */
571e22bee78STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task,
572e22bee78STejun Heo 				       unsigned int cpu)
573e22bee78STejun Heo {
574e22bee78STejun Heo 	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
575e22bee78STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
576e22bee78STejun Heo 	atomic_t *nr_running = get_gcwq_nr_running(cpu);
577e22bee78STejun Heo 
578e22bee78STejun Heo 	if (unlikely(worker->flags & WORKER_NOT_RUNNING))
579e22bee78STejun Heo 		return NULL;
580e22bee78STejun Heo 
581e22bee78STejun Heo 	/* this can only happen on the local cpu */
582e22bee78STejun Heo 	BUG_ON(cpu != raw_smp_processor_id());
583e22bee78STejun Heo 
584e22bee78STejun Heo 	/*
585e22bee78STejun Heo 	 * The counterpart of the following dec_and_test, implied mb,
586e22bee78STejun Heo 	 * worklist not empty test sequence is in insert_work().
587e22bee78STejun Heo 	 * Please read comment there.
588e22bee78STejun Heo 	 *
589e22bee78STejun Heo 	 * NOT_RUNNING is clear.  This means that trustee is not in
590e22bee78STejun Heo 	 * charge and we're running on the local cpu w/ rq lock held
591e22bee78STejun Heo 	 * and preemption disabled, which in turn means that none else
592e22bee78STejun Heo 	 * could be manipulating idle_list, so dereferencing idle_list
593e22bee78STejun Heo 	 * without gcwq lock is safe.
594e22bee78STejun Heo 	 */
595e22bee78STejun Heo 	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
596e22bee78STejun Heo 		to_wakeup = first_worker(gcwq);
597e22bee78STejun Heo 	return to_wakeup ? to_wakeup->task : NULL;
598e22bee78STejun Heo }
599e22bee78STejun Heo 
600e22bee78STejun Heo /**
601e22bee78STejun Heo  * worker_set_flags - set worker flags and adjust nr_running accordingly
602d302f017STejun Heo  * @worker: worker to set flags for
603d302f017STejun Heo  * @flags: flags to set
604d302f017STejun Heo  * @wakeup: wakeup an idle worker if necessary
605d302f017STejun Heo  *
606e22bee78STejun Heo  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
607e22bee78STejun Heo  * nr_running becomes zero and @wakeup is %true, an idle worker is
608e22bee78STejun Heo  * woken up.
609d302f017STejun Heo  *
610d302f017STejun Heo  * LOCKING:
611d302f017STejun Heo  * spin_lock_irq(gcwq->lock).
612d302f017STejun Heo  */
613d302f017STejun Heo static inline void worker_set_flags(struct worker *worker, unsigned int flags,
614d302f017STejun Heo 				    bool wakeup)
615d302f017STejun Heo {
616e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
617e22bee78STejun Heo 
618e22bee78STejun Heo 	/*
619e22bee78STejun Heo 	 * If transitioning into NOT_RUNNING, adjust nr_running and
620e22bee78STejun Heo 	 * wake up an idle worker as necessary if requested by
621e22bee78STejun Heo 	 * @wakeup.
622e22bee78STejun Heo 	 */
623e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) &&
624e22bee78STejun Heo 	    !(worker->flags & WORKER_NOT_RUNNING)) {
625e22bee78STejun Heo 		atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
626e22bee78STejun Heo 
627e22bee78STejun Heo 		if (wakeup) {
628e22bee78STejun Heo 			if (atomic_dec_and_test(nr_running) &&
629e22bee78STejun Heo 			    !list_empty(&gcwq->worklist))
630e22bee78STejun Heo 				wake_up_worker(gcwq);
631e22bee78STejun Heo 		} else
632e22bee78STejun Heo 			atomic_dec(nr_running);
633e22bee78STejun Heo 	}
634e22bee78STejun Heo 
635d302f017STejun Heo 	worker->flags |= flags;
636d302f017STejun Heo }
637d302f017STejun Heo 
638d302f017STejun Heo /**
639e22bee78STejun Heo  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
640d302f017STejun Heo  * @worker: worker to set flags for
641d302f017STejun Heo  * @flags: flags to clear
642d302f017STejun Heo  *
643e22bee78STejun Heo  * Clear @flags in @worker->flags and adjust nr_running accordingly.
644d302f017STejun Heo  *
645d302f017STejun Heo  * LOCKING:
646d302f017STejun Heo  * spin_lock_irq(gcwq->lock).
647d302f017STejun Heo  */
648d302f017STejun Heo static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
649d302f017STejun Heo {
650e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
651e22bee78STejun Heo 	unsigned int oflags = worker->flags;
652e22bee78STejun Heo 
653d302f017STejun Heo 	worker->flags &= ~flags;
654e22bee78STejun Heo 
655e22bee78STejun Heo 	/* if transitioning out of NOT_RUNNING, increment nr_running */
656e22bee78STejun Heo 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
657e22bee78STejun Heo 		if (!(worker->flags & WORKER_NOT_RUNNING))
658e22bee78STejun Heo 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
659d302f017STejun Heo }
660d302f017STejun Heo 
661d302f017STejun Heo /**
662c8e55f36STejun Heo  * busy_worker_head - return the busy hash head for a work
663c8e55f36STejun Heo  * @gcwq: gcwq of interest
664c8e55f36STejun Heo  * @work: work to be hashed
665c8e55f36STejun Heo  *
666c8e55f36STejun Heo  * Return hash head of @gcwq for @work.
667c8e55f36STejun Heo  *
668c8e55f36STejun Heo  * CONTEXT:
669c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
670c8e55f36STejun Heo  *
671c8e55f36STejun Heo  * RETURNS:
672c8e55f36STejun Heo  * Pointer to the hash head.
673c8e55f36STejun Heo  */
674c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
675c8e55f36STejun Heo 					   struct work_struct *work)
676c8e55f36STejun Heo {
677c8e55f36STejun Heo 	const int base_shift = ilog2(sizeof(struct work_struct));
678c8e55f36STejun Heo 	unsigned long v = (unsigned long)work;
679c8e55f36STejun Heo 
680c8e55f36STejun Heo 	/* simple shift and fold hash, do we need something better? */
681c8e55f36STejun Heo 	v >>= base_shift;
682c8e55f36STejun Heo 	v += v >> BUSY_WORKER_HASH_ORDER;
683c8e55f36STejun Heo 	v &= BUSY_WORKER_HASH_MASK;
684c8e55f36STejun Heo 
685c8e55f36STejun Heo 	return &gcwq->busy_hash[v];
686c8e55f36STejun Heo }
687c8e55f36STejun Heo 
688c8e55f36STejun Heo /**
6898cca0eeaSTejun Heo  * __find_worker_executing_work - find worker which is executing a work
6908cca0eeaSTejun Heo  * @gcwq: gcwq of interest
6918cca0eeaSTejun Heo  * @bwh: hash head as returned by busy_worker_head()
6928cca0eeaSTejun Heo  * @work: work to find worker for
6938cca0eeaSTejun Heo  *
6948cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  @bwh should be
6958cca0eeaSTejun Heo  * the hash head obtained by calling busy_worker_head() with the same
6968cca0eeaSTejun Heo  * work.
6978cca0eeaSTejun Heo  *
6988cca0eeaSTejun Heo  * CONTEXT:
6998cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
7008cca0eeaSTejun Heo  *
7018cca0eeaSTejun Heo  * RETURNS:
7028cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
7038cca0eeaSTejun Heo  * otherwise.
7048cca0eeaSTejun Heo  */
7058cca0eeaSTejun Heo static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
7068cca0eeaSTejun Heo 						   struct hlist_head *bwh,
7078cca0eeaSTejun Heo 						   struct work_struct *work)
7088cca0eeaSTejun Heo {
7098cca0eeaSTejun Heo 	struct worker *worker;
7108cca0eeaSTejun Heo 	struct hlist_node *tmp;
7118cca0eeaSTejun Heo 
7128cca0eeaSTejun Heo 	hlist_for_each_entry(worker, tmp, bwh, hentry)
7138cca0eeaSTejun Heo 		if (worker->current_work == work)
7148cca0eeaSTejun Heo 			return worker;
7158cca0eeaSTejun Heo 	return NULL;
7168cca0eeaSTejun Heo }
7178cca0eeaSTejun Heo 
7188cca0eeaSTejun Heo /**
7198cca0eeaSTejun Heo  * find_worker_executing_work - find worker which is executing a work
7208cca0eeaSTejun Heo  * @gcwq: gcwq of interest
7218cca0eeaSTejun Heo  * @work: work to find worker for
7228cca0eeaSTejun Heo  *
7238cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  This function is
7248cca0eeaSTejun Heo  * identical to __find_worker_executing_work() except that this
7258cca0eeaSTejun Heo  * function calculates @bwh itself.
7268cca0eeaSTejun Heo  *
7278cca0eeaSTejun Heo  * CONTEXT:
7288cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
7298cca0eeaSTejun Heo  *
7308cca0eeaSTejun Heo  * RETURNS:
7318cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
7328cca0eeaSTejun Heo  * otherwise.
7338cca0eeaSTejun Heo  */
7348cca0eeaSTejun Heo static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
7358cca0eeaSTejun Heo 						 struct work_struct *work)
7368cca0eeaSTejun Heo {
7378cca0eeaSTejun Heo 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
7388cca0eeaSTejun Heo 					    work);
7398cca0eeaSTejun Heo }
7408cca0eeaSTejun Heo 
7418cca0eeaSTejun Heo /**
742*649027d7STejun Heo  * gcwq_determine_ins_pos - find insertion position
743*649027d7STejun Heo  * @gcwq: gcwq of interest
744*649027d7STejun Heo  * @cwq: cwq a work is being queued for
745*649027d7STejun Heo  *
746*649027d7STejun Heo  * A work for @cwq is about to be queued on @gcwq, determine insertion
747*649027d7STejun Heo  * position for the work.  If @cwq is for HIGHPRI wq, the work is
748*649027d7STejun Heo  * queued at the head of the queue but in FIFO order with respect to
749*649027d7STejun Heo  * other HIGHPRI works; otherwise, at the end of the queue.  This
750*649027d7STejun Heo  * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
751*649027d7STejun Heo  * there are HIGHPRI works pending.
752*649027d7STejun Heo  *
753*649027d7STejun Heo  * CONTEXT:
754*649027d7STejun Heo  * spin_lock_irq(gcwq->lock).
755*649027d7STejun Heo  *
756*649027d7STejun Heo  * RETURNS:
757*649027d7STejun Heo  * Pointer to inserstion position.
758*649027d7STejun Heo  */
759*649027d7STejun Heo static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
760*649027d7STejun Heo 					       struct cpu_workqueue_struct *cwq)
761*649027d7STejun Heo {
762*649027d7STejun Heo 	struct work_struct *twork;
763*649027d7STejun Heo 
764*649027d7STejun Heo 	if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
765*649027d7STejun Heo 		return &gcwq->worklist;
766*649027d7STejun Heo 
767*649027d7STejun Heo 	list_for_each_entry(twork, &gcwq->worklist, entry) {
768*649027d7STejun Heo 		struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
769*649027d7STejun Heo 
770*649027d7STejun Heo 		if (!(tcwq->wq->flags & WQ_HIGHPRI))
771*649027d7STejun Heo 			break;
772*649027d7STejun Heo 	}
773*649027d7STejun Heo 
774*649027d7STejun Heo 	gcwq->flags |= GCWQ_HIGHPRI_PENDING;
775*649027d7STejun Heo 	return &twork->entry;
776*649027d7STejun Heo }
777*649027d7STejun Heo 
778*649027d7STejun Heo /**
7797e11629dSTejun Heo  * insert_work - insert a work into gcwq
7804690c4abSTejun Heo  * @cwq: cwq @work belongs to
7814690c4abSTejun Heo  * @work: work to insert
7824690c4abSTejun Heo  * @head: insertion point
7834690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
7844690c4abSTejun Heo  *
7857e11629dSTejun Heo  * Insert @work which belongs to @cwq into @gcwq after @head.
7867e11629dSTejun Heo  * @extra_flags is or'd to work_struct flags.
7874690c4abSTejun Heo  *
7884690c4abSTejun Heo  * CONTEXT:
7898b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
7904690c4abSTejun Heo  */
791b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
7924690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
7934690c4abSTejun Heo 			unsigned int extra_flags)
794b89deed3SOleg Nesterov {
795e22bee78STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
796e22bee78STejun Heo 
7974690c4abSTejun Heo 	/* we own @work, set data and link */
7987a22ad75STejun Heo 	set_work_cwq(work, cwq, extra_flags);
7994690c4abSTejun Heo 
8006e84d644SOleg Nesterov 	/*
8016e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
8026e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
8036e84d644SOleg Nesterov 	 */
8046e84d644SOleg Nesterov 	smp_wmb();
8054690c4abSTejun Heo 
8061a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
807e22bee78STejun Heo 
808e22bee78STejun Heo 	/*
809e22bee78STejun Heo 	 * Ensure either worker_sched_deactivated() sees the above
810e22bee78STejun Heo 	 * list_add_tail() or we see zero nr_running to avoid workers
811e22bee78STejun Heo 	 * lying around lazily while there are works to be processed.
812e22bee78STejun Heo 	 */
813e22bee78STejun Heo 	smp_mb();
814e22bee78STejun Heo 
815*649027d7STejun Heo 	if (__need_more_worker(gcwq))
816e22bee78STejun Heo 		wake_up_worker(gcwq);
817b89deed3SOleg Nesterov }
818b89deed3SOleg Nesterov 
819502ca9d8STejun Heo /**
820502ca9d8STejun Heo  * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
821502ca9d8STejun Heo  * @cwq: cwq to unbind
822502ca9d8STejun Heo  *
823502ca9d8STejun Heo  * Try to unbind @cwq from single cpu workqueue processing.  If
824502ca9d8STejun Heo  * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
825502ca9d8STejun Heo  *
826502ca9d8STejun Heo  * CONTEXT:
827502ca9d8STejun Heo  * spin_lock_irq(gcwq->lock).
828502ca9d8STejun Heo  */
829502ca9d8STejun Heo static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
830502ca9d8STejun Heo {
831502ca9d8STejun Heo 	struct workqueue_struct *wq = cwq->wq;
832502ca9d8STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
833502ca9d8STejun Heo 
834502ca9d8STejun Heo 	BUG_ON(wq->single_cpu != gcwq->cpu);
835502ca9d8STejun Heo 	/*
836502ca9d8STejun Heo 	 * Unbind from workqueue if @cwq is not frozen.  If frozen,
837502ca9d8STejun Heo 	 * thaw_workqueues() will either restart processing on this
838502ca9d8STejun Heo 	 * cpu or unbind if empty.  This keeps works queued while
839502ca9d8STejun Heo 	 * frozen fully ordered and flushable.
840502ca9d8STejun Heo 	 */
841502ca9d8STejun Heo 	if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
842502ca9d8STejun Heo 		smp_wmb();	/* paired with cmpxchg() in __queue_work() */
843502ca9d8STejun Heo 		wq->single_cpu = NR_CPUS;
844502ca9d8STejun Heo 	}
845502ca9d8STejun Heo }
846502ca9d8STejun Heo 
8474690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
8481da177e4SLinus Torvalds 			 struct work_struct *work)
8491da177e4SLinus Torvalds {
850502ca9d8STejun Heo 	struct global_cwq *gcwq;
851502ca9d8STejun Heo 	struct cpu_workqueue_struct *cwq;
8521e19ffc6STejun Heo 	struct list_head *worklist;
8531da177e4SLinus Torvalds 	unsigned long flags;
854502ca9d8STejun Heo 	bool arbitrate;
8551da177e4SLinus Torvalds 
856dc186ad7SThomas Gleixner 	debug_work_activate(work);
8571e19ffc6STejun Heo 
85818aa9effSTejun Heo 	/*
85918aa9effSTejun Heo 	 * Determine gcwq to use.  SINGLE_CPU is inherently
86018aa9effSTejun Heo 	 * NON_REENTRANT, so test it first.
86118aa9effSTejun Heo 	 */
862502ca9d8STejun Heo 	if (!(wq->flags & WQ_SINGLE_CPU)) {
86318aa9effSTejun Heo 		struct global_cwq *last_gcwq;
86418aa9effSTejun Heo 
86518aa9effSTejun Heo 		/*
86618aa9effSTejun Heo 		 * It's multi cpu.  If @wq is non-reentrant and @work
86718aa9effSTejun Heo 		 * was previously on a different cpu, it might still
86818aa9effSTejun Heo 		 * be running there, in which case the work needs to
86918aa9effSTejun Heo 		 * be queued on that cpu to guarantee non-reentrance.
87018aa9effSTejun Heo 		 */
871502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
87218aa9effSTejun Heo 		if (wq->flags & WQ_NON_REENTRANT &&
87318aa9effSTejun Heo 		    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
87418aa9effSTejun Heo 			struct worker *worker;
87518aa9effSTejun Heo 
87618aa9effSTejun Heo 			spin_lock_irqsave(&last_gcwq->lock, flags);
87718aa9effSTejun Heo 
87818aa9effSTejun Heo 			worker = find_worker_executing_work(last_gcwq, work);
87918aa9effSTejun Heo 
88018aa9effSTejun Heo 			if (worker && worker->current_cwq->wq == wq)
88118aa9effSTejun Heo 				gcwq = last_gcwq;
88218aa9effSTejun Heo 			else {
88318aa9effSTejun Heo 				/* meh... not running there, queue here */
88418aa9effSTejun Heo 				spin_unlock_irqrestore(&last_gcwq->lock, flags);
88518aa9effSTejun Heo 				spin_lock_irqsave(&gcwq->lock, flags);
88618aa9effSTejun Heo 			}
88718aa9effSTejun Heo 		} else
8888b03ae3cSTejun Heo 			spin_lock_irqsave(&gcwq->lock, flags);
889502ca9d8STejun Heo 	} else {
890502ca9d8STejun Heo 		unsigned int req_cpu = cpu;
891502ca9d8STejun Heo 
892502ca9d8STejun Heo 		/*
893502ca9d8STejun Heo 		 * It's a bit more complex for single cpu workqueues.
894502ca9d8STejun Heo 		 * We first need to determine which cpu is going to be
895502ca9d8STejun Heo 		 * used.  If no cpu is currently serving this
896502ca9d8STejun Heo 		 * workqueue, arbitrate using atomic accesses to
897502ca9d8STejun Heo 		 * wq->single_cpu; otherwise, use the current one.
898502ca9d8STejun Heo 		 */
899502ca9d8STejun Heo 	retry:
900502ca9d8STejun Heo 		cpu = wq->single_cpu;
901502ca9d8STejun Heo 		arbitrate = cpu == NR_CPUS;
902502ca9d8STejun Heo 		if (arbitrate)
903502ca9d8STejun Heo 			cpu = req_cpu;
904502ca9d8STejun Heo 
905502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
906502ca9d8STejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
907502ca9d8STejun Heo 
908502ca9d8STejun Heo 		/*
909502ca9d8STejun Heo 		 * The following cmpxchg() is a full barrier paired
910502ca9d8STejun Heo 		 * with smp_wmb() in cwq_unbind_single_cpu() and
911502ca9d8STejun Heo 		 * guarantees that all changes to wq->st_* fields are
912502ca9d8STejun Heo 		 * visible on the new cpu after this point.
913502ca9d8STejun Heo 		 */
914502ca9d8STejun Heo 		if (arbitrate)
915502ca9d8STejun Heo 			cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
916502ca9d8STejun Heo 
917502ca9d8STejun Heo 		if (unlikely(wq->single_cpu != cpu)) {
918502ca9d8STejun Heo 			spin_unlock_irqrestore(&gcwq->lock, flags);
919502ca9d8STejun Heo 			goto retry;
920502ca9d8STejun Heo 		}
921502ca9d8STejun Heo 	}
922502ca9d8STejun Heo 
923502ca9d8STejun Heo 	/* gcwq determined, get cwq and queue */
924502ca9d8STejun Heo 	cwq = get_cwq(gcwq->cpu, wq);
925502ca9d8STejun Heo 
9264690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
9271e19ffc6STejun Heo 
92873f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
9291e19ffc6STejun Heo 
9301e19ffc6STejun Heo 	if (likely(cwq->nr_active < cwq->max_active)) {
9311e19ffc6STejun Heo 		cwq->nr_active++;
932*649027d7STejun Heo 		worklist = gcwq_determine_ins_pos(gcwq, cwq);
9331e19ffc6STejun Heo 	} else
9341e19ffc6STejun Heo 		worklist = &cwq->delayed_works;
9351e19ffc6STejun Heo 
9361e19ffc6STejun Heo 	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
9371e19ffc6STejun Heo 
9388b03ae3cSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
9391da177e4SLinus Torvalds }
9401da177e4SLinus Torvalds 
9410fcb78c2SRolf Eike Beer /**
9420fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
9430fcb78c2SRolf Eike Beer  * @wq: workqueue to use
9440fcb78c2SRolf Eike Beer  * @work: work to queue
9450fcb78c2SRolf Eike Beer  *
946057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
9471da177e4SLinus Torvalds  *
94800dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
94900dfcaf7SOleg Nesterov  * it can be processed by another CPU.
9501da177e4SLinus Torvalds  */
9517ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
9521da177e4SLinus Torvalds {
953ef1ca236SOleg Nesterov 	int ret;
9541da177e4SLinus Torvalds 
955ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
956a848e3b6SOleg Nesterov 	put_cpu();
957ef1ca236SOleg Nesterov 
9581da177e4SLinus Torvalds 	return ret;
9591da177e4SLinus Torvalds }
960ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
9611da177e4SLinus Torvalds 
962c1a220e7SZhang Rui /**
963c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
964c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
965c1a220e7SZhang Rui  * @wq: workqueue to use
966c1a220e7SZhang Rui  * @work: work to queue
967c1a220e7SZhang Rui  *
968c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
969c1a220e7SZhang Rui  *
970c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
971c1a220e7SZhang Rui  * can't go away.
972c1a220e7SZhang Rui  */
973c1a220e7SZhang Rui int
974c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
975c1a220e7SZhang Rui {
976c1a220e7SZhang Rui 	int ret = 0;
977c1a220e7SZhang Rui 
97822df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
9794690c4abSTejun Heo 		__queue_work(cpu, wq, work);
980c1a220e7SZhang Rui 		ret = 1;
981c1a220e7SZhang Rui 	}
982c1a220e7SZhang Rui 	return ret;
983c1a220e7SZhang Rui }
984c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
985c1a220e7SZhang Rui 
9866d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
9871da177e4SLinus Torvalds {
98852bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
9897a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
9901da177e4SLinus Torvalds 
9914690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
9921da177e4SLinus Torvalds }
9931da177e4SLinus Torvalds 
9940fcb78c2SRolf Eike Beer /**
9950fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
9960fcb78c2SRolf Eike Beer  * @wq: workqueue to use
997af9997e4SRandy Dunlap  * @dwork: delayable work to queue
9980fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
9990fcb78c2SRolf Eike Beer  *
1000057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10010fcb78c2SRolf Eike Beer  */
10027ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
100352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10041da177e4SLinus Torvalds {
100552bad64dSDavid Howells 	if (delay == 0)
100663bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
10071da177e4SLinus Torvalds 
100863bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
10091da177e4SLinus Torvalds }
1010ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
10111da177e4SLinus Torvalds 
10120fcb78c2SRolf Eike Beer /**
10130fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
10140fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
10150fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1016af9997e4SRandy Dunlap  * @dwork: work to queue
10170fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
10180fcb78c2SRolf Eike Beer  *
1019057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
10200fcb78c2SRolf Eike Beer  */
10217a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
102252bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
10237a6bc1cdSVenkatesh Pallipadi {
10247a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
102552bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
102652bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
10277a6bc1cdSVenkatesh Pallipadi 
102822df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
10297a22ad75STejun Heo 		struct global_cwq *gcwq = get_work_gcwq(work);
10307a22ad75STejun Heo 		unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
10317a22ad75STejun Heo 
10327a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
10337a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
10347a6bc1cdSVenkatesh Pallipadi 
10358a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
10367a22ad75STejun Heo 		/*
10377a22ad75STejun Heo 		 * This stores cwq for the moment, for the timer_fn.
10387a22ad75STejun Heo 		 * Note that the work's gcwq is preserved to allow
10397a22ad75STejun Heo 		 * reentrance detection for delayed works.
10407a22ad75STejun Heo 		 */
10417a22ad75STejun Heo 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
10427a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
104352bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
10447a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
104563bc0362SOleg Nesterov 
104663bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
10477a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
104863bc0362SOleg Nesterov 		else
104963bc0362SOleg Nesterov 			add_timer(timer);
10507a6bc1cdSVenkatesh Pallipadi 		ret = 1;
10517a6bc1cdSVenkatesh Pallipadi 	}
10527a6bc1cdSVenkatesh Pallipadi 	return ret;
10537a6bc1cdSVenkatesh Pallipadi }
1054ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
10551da177e4SLinus Torvalds 
1056c8e55f36STejun Heo /**
1057c8e55f36STejun Heo  * worker_enter_idle - enter idle state
1058c8e55f36STejun Heo  * @worker: worker which is entering idle state
1059c8e55f36STejun Heo  *
1060c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
1061c8e55f36STejun Heo  * necessary.
1062c8e55f36STejun Heo  *
1063c8e55f36STejun Heo  * LOCKING:
1064c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1065c8e55f36STejun Heo  */
1066c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
1067c8e55f36STejun Heo {
1068c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1069c8e55f36STejun Heo 
1070c8e55f36STejun Heo 	BUG_ON(worker->flags & WORKER_IDLE);
1071c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->entry) &&
1072c8e55f36STejun Heo 	       (worker->hentry.next || worker->hentry.pprev));
1073c8e55f36STejun Heo 
1074d302f017STejun Heo 	worker_set_flags(worker, WORKER_IDLE, false);
1075c8e55f36STejun Heo 	gcwq->nr_idle++;
1076e22bee78STejun Heo 	worker->last_active = jiffies;
1077c8e55f36STejun Heo 
1078c8e55f36STejun Heo 	/* idle_list is LIFO */
1079c8e55f36STejun Heo 	list_add(&worker->entry, &gcwq->idle_list);
1080db7bccf4STejun Heo 
1081e22bee78STejun Heo 	if (likely(!(worker->flags & WORKER_ROGUE))) {
1082e22bee78STejun Heo 		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1083e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer,
1084e22bee78STejun Heo 				  jiffies + IDLE_WORKER_TIMEOUT);
1085e22bee78STejun Heo 	} else
1086db7bccf4STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1087c8e55f36STejun Heo }
1088c8e55f36STejun Heo 
1089c8e55f36STejun Heo /**
1090c8e55f36STejun Heo  * worker_leave_idle - leave idle state
1091c8e55f36STejun Heo  * @worker: worker which is leaving idle state
1092c8e55f36STejun Heo  *
1093c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
1094c8e55f36STejun Heo  *
1095c8e55f36STejun Heo  * LOCKING:
1096c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
1097c8e55f36STejun Heo  */
1098c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
1099c8e55f36STejun Heo {
1100c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1101c8e55f36STejun Heo 
1102c8e55f36STejun Heo 	BUG_ON(!(worker->flags & WORKER_IDLE));
1103d302f017STejun Heo 	worker_clr_flags(worker, WORKER_IDLE);
1104c8e55f36STejun Heo 	gcwq->nr_idle--;
1105c8e55f36STejun Heo 	list_del_init(&worker->entry);
1106c8e55f36STejun Heo }
1107c8e55f36STejun Heo 
1108e22bee78STejun Heo /**
1109e22bee78STejun Heo  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1110e22bee78STejun Heo  * @worker: self
1111e22bee78STejun Heo  *
1112e22bee78STejun Heo  * Works which are scheduled while the cpu is online must at least be
1113e22bee78STejun Heo  * scheduled to a worker which is bound to the cpu so that if they are
1114e22bee78STejun Heo  * flushed from cpu callbacks while cpu is going down, they are
1115e22bee78STejun Heo  * guaranteed to execute on the cpu.
1116e22bee78STejun Heo  *
1117e22bee78STejun Heo  * This function is to be used by rogue workers and rescuers to bind
1118e22bee78STejun Heo  * themselves to the target cpu and may race with cpu going down or
1119e22bee78STejun Heo  * coming online.  kthread_bind() can't be used because it may put the
1120e22bee78STejun Heo  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1121e22bee78STejun Heo  * verbatim as it's best effort and blocking and gcwq may be
1122e22bee78STejun Heo  * [dis]associated in the meantime.
1123e22bee78STejun Heo  *
1124e22bee78STejun Heo  * This function tries set_cpus_allowed() and locks gcwq and verifies
1125e22bee78STejun Heo  * the binding against GCWQ_DISASSOCIATED which is set during
1126e22bee78STejun Heo  * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1127e22bee78STejun Heo  * idle state or fetches works without dropping lock, it can guarantee
1128e22bee78STejun Heo  * the scheduling requirement described in the first paragraph.
1129e22bee78STejun Heo  *
1130e22bee78STejun Heo  * CONTEXT:
1131e22bee78STejun Heo  * Might sleep.  Called without any lock but returns with gcwq->lock
1132e22bee78STejun Heo  * held.
1133e22bee78STejun Heo  *
1134e22bee78STejun Heo  * RETURNS:
1135e22bee78STejun Heo  * %true if the associated gcwq is online (@worker is successfully
1136e22bee78STejun Heo  * bound), %false if offline.
1137e22bee78STejun Heo  */
1138e22bee78STejun Heo static bool worker_maybe_bind_and_lock(struct worker *worker)
1139e22bee78STejun Heo {
1140e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1141e22bee78STejun Heo 	struct task_struct *task = worker->task;
1142e22bee78STejun Heo 
1143e22bee78STejun Heo 	while (true) {
1144e22bee78STejun Heo 		/*
1145e22bee78STejun Heo 		 * The following call may fail, succeed or succeed
1146e22bee78STejun Heo 		 * without actually migrating the task to the cpu if
1147e22bee78STejun Heo 		 * it races with cpu hotunplug operation.  Verify
1148e22bee78STejun Heo 		 * against GCWQ_DISASSOCIATED.
1149e22bee78STejun Heo 		 */
1150e22bee78STejun Heo 		set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1151e22bee78STejun Heo 
1152e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1153e22bee78STejun Heo 		if (gcwq->flags & GCWQ_DISASSOCIATED)
1154e22bee78STejun Heo 			return false;
1155e22bee78STejun Heo 		if (task_cpu(task) == gcwq->cpu &&
1156e22bee78STejun Heo 		    cpumask_equal(&current->cpus_allowed,
1157e22bee78STejun Heo 				  get_cpu_mask(gcwq->cpu)))
1158e22bee78STejun Heo 			return true;
1159e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1160e22bee78STejun Heo 
1161e22bee78STejun Heo 		/* CPU has come up inbetween, retry migration */
1162e22bee78STejun Heo 		cpu_relax();
1163e22bee78STejun Heo 	}
1164e22bee78STejun Heo }
1165e22bee78STejun Heo 
1166e22bee78STejun Heo /*
1167e22bee78STejun Heo  * Function for worker->rebind_work used to rebind rogue busy workers
1168e22bee78STejun Heo  * to the associated cpu which is coming back online.  This is
1169e22bee78STejun Heo  * scheduled by cpu up but can race with other cpu hotplug operations
1170e22bee78STejun Heo  * and may be executed twice without intervening cpu down.
1171e22bee78STejun Heo  */
1172e22bee78STejun Heo static void worker_rebind_fn(struct work_struct *work)
1173e22bee78STejun Heo {
1174e22bee78STejun Heo 	struct worker *worker = container_of(work, struct worker, rebind_work);
1175e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1176e22bee78STejun Heo 
1177e22bee78STejun Heo 	if (worker_maybe_bind_and_lock(worker))
1178e22bee78STejun Heo 		worker_clr_flags(worker, WORKER_REBIND);
1179e22bee78STejun Heo 
1180e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1181e22bee78STejun Heo }
1182e22bee78STejun Heo 
1183c34056a3STejun Heo static struct worker *alloc_worker(void)
1184c34056a3STejun Heo {
1185c34056a3STejun Heo 	struct worker *worker;
1186c34056a3STejun Heo 
1187c34056a3STejun Heo 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1188c8e55f36STejun Heo 	if (worker) {
1189c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
1190affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
1191e22bee78STejun Heo 		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1192e22bee78STejun Heo 		/* on creation a worker is in !idle && prep state */
1193e22bee78STejun Heo 		worker->flags = WORKER_PREP;
1194c8e55f36STejun Heo 	}
1195c34056a3STejun Heo 	return worker;
1196c34056a3STejun Heo }
1197c34056a3STejun Heo 
1198c34056a3STejun Heo /**
1199c34056a3STejun Heo  * create_worker - create a new workqueue worker
12007e11629dSTejun Heo  * @gcwq: gcwq the new worker will belong to
1201c34056a3STejun Heo  * @bind: whether to set affinity to @cpu or not
1202c34056a3STejun Heo  *
12037e11629dSTejun Heo  * Create a new worker which is bound to @gcwq.  The returned worker
1204c34056a3STejun Heo  * can be started by calling start_worker() or destroyed using
1205c34056a3STejun Heo  * destroy_worker().
1206c34056a3STejun Heo  *
1207c34056a3STejun Heo  * CONTEXT:
1208c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
1209c34056a3STejun Heo  *
1210c34056a3STejun Heo  * RETURNS:
1211c34056a3STejun Heo  * Pointer to the newly created worker.
1212c34056a3STejun Heo  */
12137e11629dSTejun Heo static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1214c34056a3STejun Heo {
1215c34056a3STejun Heo 	int id = -1;
1216c34056a3STejun Heo 	struct worker *worker = NULL;
1217c34056a3STejun Heo 
12188b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
12198b03ae3cSTejun Heo 	while (ida_get_new(&gcwq->worker_ida, &id)) {
12208b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
12218b03ae3cSTejun Heo 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1222c34056a3STejun Heo 			goto fail;
12238b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
1224c34056a3STejun Heo 	}
12258b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1226c34056a3STejun Heo 
1227c34056a3STejun Heo 	worker = alloc_worker();
1228c34056a3STejun Heo 	if (!worker)
1229c34056a3STejun Heo 		goto fail;
1230c34056a3STejun Heo 
12318b03ae3cSTejun Heo 	worker->gcwq = gcwq;
1232c34056a3STejun Heo 	worker->id = id;
1233c34056a3STejun Heo 
1234c34056a3STejun Heo 	worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
12358b03ae3cSTejun Heo 				      gcwq->cpu, id);
1236c34056a3STejun Heo 	if (IS_ERR(worker->task))
1237c34056a3STejun Heo 		goto fail;
1238c34056a3STejun Heo 
1239db7bccf4STejun Heo 	/*
1240db7bccf4STejun Heo 	 * A rogue worker will become a regular one if CPU comes
1241db7bccf4STejun Heo 	 * online later on.  Make sure every worker has
1242db7bccf4STejun Heo 	 * PF_THREAD_BOUND set.
1243db7bccf4STejun Heo 	 */
1244c34056a3STejun Heo 	if (bind)
12458b03ae3cSTejun Heo 		kthread_bind(worker->task, gcwq->cpu);
1246db7bccf4STejun Heo 	else
1247db7bccf4STejun Heo 		worker->task->flags |= PF_THREAD_BOUND;
1248c34056a3STejun Heo 
1249c34056a3STejun Heo 	return worker;
1250c34056a3STejun Heo fail:
1251c34056a3STejun Heo 	if (id >= 0) {
12528b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
12538b03ae3cSTejun Heo 		ida_remove(&gcwq->worker_ida, id);
12548b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1255c34056a3STejun Heo 	}
1256c34056a3STejun Heo 	kfree(worker);
1257c34056a3STejun Heo 	return NULL;
1258c34056a3STejun Heo }
1259c34056a3STejun Heo 
1260c34056a3STejun Heo /**
1261c34056a3STejun Heo  * start_worker - start a newly created worker
1262c34056a3STejun Heo  * @worker: worker to start
1263c34056a3STejun Heo  *
1264c8e55f36STejun Heo  * Make the gcwq aware of @worker and start it.
1265c34056a3STejun Heo  *
1266c34056a3STejun Heo  * CONTEXT:
12678b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1268c34056a3STejun Heo  */
1269c34056a3STejun Heo static void start_worker(struct worker *worker)
1270c34056a3STejun Heo {
1271d302f017STejun Heo 	worker_set_flags(worker, WORKER_STARTED, false);
1272c8e55f36STejun Heo 	worker->gcwq->nr_workers++;
1273c8e55f36STejun Heo 	worker_enter_idle(worker);
1274c34056a3STejun Heo 	wake_up_process(worker->task);
1275c34056a3STejun Heo }
1276c34056a3STejun Heo 
1277c34056a3STejun Heo /**
1278c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
1279c34056a3STejun Heo  * @worker: worker to be destroyed
1280c34056a3STejun Heo  *
1281c8e55f36STejun Heo  * Destroy @worker and adjust @gcwq stats accordingly.
1282c8e55f36STejun Heo  *
1283c8e55f36STejun Heo  * CONTEXT:
1284c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1285c34056a3STejun Heo  */
1286c34056a3STejun Heo static void destroy_worker(struct worker *worker)
1287c34056a3STejun Heo {
12888b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1289c34056a3STejun Heo 	int id = worker->id;
1290c34056a3STejun Heo 
1291c34056a3STejun Heo 	/* sanity check frenzy */
1292c34056a3STejun Heo 	BUG_ON(worker->current_work);
1293affee4b2STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1294c34056a3STejun Heo 
1295c8e55f36STejun Heo 	if (worker->flags & WORKER_STARTED)
1296c8e55f36STejun Heo 		gcwq->nr_workers--;
1297c8e55f36STejun Heo 	if (worker->flags & WORKER_IDLE)
1298c8e55f36STejun Heo 		gcwq->nr_idle--;
1299c8e55f36STejun Heo 
1300c8e55f36STejun Heo 	list_del_init(&worker->entry);
1301d302f017STejun Heo 	worker_set_flags(worker, WORKER_DIE, false);
1302c8e55f36STejun Heo 
1303c8e55f36STejun Heo 	spin_unlock_irq(&gcwq->lock);
1304c8e55f36STejun Heo 
1305c34056a3STejun Heo 	kthread_stop(worker->task);
1306c34056a3STejun Heo 	kfree(worker);
1307c34056a3STejun Heo 
13088b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
13098b03ae3cSTejun Heo 	ida_remove(&gcwq->worker_ida, id);
1310c34056a3STejun Heo }
1311c34056a3STejun Heo 
1312e22bee78STejun Heo static void idle_worker_timeout(unsigned long __gcwq)
1313e22bee78STejun Heo {
1314e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1315e22bee78STejun Heo 
1316e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1317e22bee78STejun Heo 
1318e22bee78STejun Heo 	if (too_many_workers(gcwq)) {
1319e22bee78STejun Heo 		struct worker *worker;
1320e22bee78STejun Heo 		unsigned long expires;
1321e22bee78STejun Heo 
1322e22bee78STejun Heo 		/* idle_list is kept in LIFO order, check the last one */
1323e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1324e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1325e22bee78STejun Heo 
1326e22bee78STejun Heo 		if (time_before(jiffies, expires))
1327e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1328e22bee78STejun Heo 		else {
1329e22bee78STejun Heo 			/* it's been idle for too long, wake up manager */
1330e22bee78STejun Heo 			gcwq->flags |= GCWQ_MANAGE_WORKERS;
1331e22bee78STejun Heo 			wake_up_worker(gcwq);
1332e22bee78STejun Heo 		}
1333e22bee78STejun Heo 	}
1334e22bee78STejun Heo 
1335e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1336e22bee78STejun Heo }
1337e22bee78STejun Heo 
1338e22bee78STejun Heo static bool send_mayday(struct work_struct *work)
1339e22bee78STejun Heo {
1340e22bee78STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1341e22bee78STejun Heo 	struct workqueue_struct *wq = cwq->wq;
1342e22bee78STejun Heo 
1343e22bee78STejun Heo 	if (!(wq->flags & WQ_RESCUER))
1344e22bee78STejun Heo 		return false;
1345e22bee78STejun Heo 
1346e22bee78STejun Heo 	/* mayday mayday mayday */
1347e22bee78STejun Heo 	if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask))
1348e22bee78STejun Heo 		wake_up_process(wq->rescuer->task);
1349e22bee78STejun Heo 	return true;
1350e22bee78STejun Heo }
1351e22bee78STejun Heo 
1352e22bee78STejun Heo static void gcwq_mayday_timeout(unsigned long __gcwq)
1353e22bee78STejun Heo {
1354e22bee78STejun Heo 	struct global_cwq *gcwq = (void *)__gcwq;
1355e22bee78STejun Heo 	struct work_struct *work;
1356e22bee78STejun Heo 
1357e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1358e22bee78STejun Heo 
1359e22bee78STejun Heo 	if (need_to_create_worker(gcwq)) {
1360e22bee78STejun Heo 		/*
1361e22bee78STejun Heo 		 * We've been trying to create a new worker but
1362e22bee78STejun Heo 		 * haven't been successful.  We might be hitting an
1363e22bee78STejun Heo 		 * allocation deadlock.  Send distress signals to
1364e22bee78STejun Heo 		 * rescuers.
1365e22bee78STejun Heo 		 */
1366e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry)
1367e22bee78STejun Heo 			send_mayday(work);
1368e22bee78STejun Heo 	}
1369e22bee78STejun Heo 
1370e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1371e22bee78STejun Heo 
1372e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1373e22bee78STejun Heo }
1374e22bee78STejun Heo 
1375e22bee78STejun Heo /**
1376e22bee78STejun Heo  * maybe_create_worker - create a new worker if necessary
1377e22bee78STejun Heo  * @gcwq: gcwq to create a new worker for
1378e22bee78STejun Heo  *
1379e22bee78STejun Heo  * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1380e22bee78STejun Heo  * have at least one idle worker on return from this function.  If
1381e22bee78STejun Heo  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1382e22bee78STejun Heo  * sent to all rescuers with works scheduled on @gcwq to resolve
1383e22bee78STejun Heo  * possible allocation deadlock.
1384e22bee78STejun Heo  *
1385e22bee78STejun Heo  * On return, need_to_create_worker() is guaranteed to be false and
1386e22bee78STejun Heo  * may_start_working() true.
1387e22bee78STejun Heo  *
1388e22bee78STejun Heo  * LOCKING:
1389e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1390e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.  Called only from
1391e22bee78STejun Heo  * manager.
1392e22bee78STejun Heo  *
1393e22bee78STejun Heo  * RETURNS:
1394e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1395e22bee78STejun Heo  * otherwise.
1396e22bee78STejun Heo  */
1397e22bee78STejun Heo static bool maybe_create_worker(struct global_cwq *gcwq)
1398e22bee78STejun Heo {
1399e22bee78STejun Heo 	if (!need_to_create_worker(gcwq))
1400e22bee78STejun Heo 		return false;
1401e22bee78STejun Heo restart:
1402e22bee78STejun Heo 	/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1403e22bee78STejun Heo 	mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1404e22bee78STejun Heo 
1405e22bee78STejun Heo 	while (true) {
1406e22bee78STejun Heo 		struct worker *worker;
1407e22bee78STejun Heo 
1408e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1409e22bee78STejun Heo 
1410e22bee78STejun Heo 		worker = create_worker(gcwq, true);
1411e22bee78STejun Heo 		if (worker) {
1412e22bee78STejun Heo 			del_timer_sync(&gcwq->mayday_timer);
1413e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
1414e22bee78STejun Heo 			start_worker(worker);
1415e22bee78STejun Heo 			BUG_ON(need_to_create_worker(gcwq));
1416e22bee78STejun Heo 			return true;
1417e22bee78STejun Heo 		}
1418e22bee78STejun Heo 
1419e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1420e22bee78STejun Heo 			break;
1421e22bee78STejun Heo 
1422e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1423e22bee78STejun Heo 		__set_current_state(TASK_INTERRUPTIBLE);
1424e22bee78STejun Heo 		schedule_timeout(CREATE_COOLDOWN);
1425e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
1426e22bee78STejun Heo 		if (!need_to_create_worker(gcwq))
1427e22bee78STejun Heo 			break;
1428e22bee78STejun Heo 	}
1429e22bee78STejun Heo 
1430e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
1431e22bee78STejun Heo 	del_timer_sync(&gcwq->mayday_timer);
1432e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
1433e22bee78STejun Heo 	if (need_to_create_worker(gcwq))
1434e22bee78STejun Heo 		goto restart;
1435e22bee78STejun Heo 	return true;
1436e22bee78STejun Heo }
1437e22bee78STejun Heo 
1438e22bee78STejun Heo /**
1439e22bee78STejun Heo  * maybe_destroy_worker - destroy workers which have been idle for a while
1440e22bee78STejun Heo  * @gcwq: gcwq to destroy workers for
1441e22bee78STejun Heo  *
1442e22bee78STejun Heo  * Destroy @gcwq workers which have been idle for longer than
1443e22bee78STejun Heo  * IDLE_WORKER_TIMEOUT.
1444e22bee78STejun Heo  *
1445e22bee78STejun Heo  * LOCKING:
1446e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1447e22bee78STejun Heo  * multiple times.  Called only from manager.
1448e22bee78STejun Heo  *
1449e22bee78STejun Heo  * RETURNS:
1450e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true
1451e22bee78STejun Heo  * otherwise.
1452e22bee78STejun Heo  */
1453e22bee78STejun Heo static bool maybe_destroy_workers(struct global_cwq *gcwq)
1454e22bee78STejun Heo {
1455e22bee78STejun Heo 	bool ret = false;
1456e22bee78STejun Heo 
1457e22bee78STejun Heo 	while (too_many_workers(gcwq)) {
1458e22bee78STejun Heo 		struct worker *worker;
1459e22bee78STejun Heo 		unsigned long expires;
1460e22bee78STejun Heo 
1461e22bee78STejun Heo 		worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1462e22bee78STejun Heo 		expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1463e22bee78STejun Heo 
1464e22bee78STejun Heo 		if (time_before(jiffies, expires)) {
1465e22bee78STejun Heo 			mod_timer(&gcwq->idle_timer, expires);
1466e22bee78STejun Heo 			break;
1467e22bee78STejun Heo 		}
1468e22bee78STejun Heo 
1469e22bee78STejun Heo 		destroy_worker(worker);
1470e22bee78STejun Heo 		ret = true;
1471e22bee78STejun Heo 	}
1472e22bee78STejun Heo 
1473e22bee78STejun Heo 	return ret;
1474e22bee78STejun Heo }
1475e22bee78STejun Heo 
1476e22bee78STejun Heo /**
1477e22bee78STejun Heo  * manage_workers - manage worker pool
1478e22bee78STejun Heo  * @worker: self
1479e22bee78STejun Heo  *
1480e22bee78STejun Heo  * Assume the manager role and manage gcwq worker pool @worker belongs
1481e22bee78STejun Heo  * to.  At any given time, there can be only zero or one manager per
1482e22bee78STejun Heo  * gcwq.  The exclusion is handled automatically by this function.
1483e22bee78STejun Heo  *
1484e22bee78STejun Heo  * The caller can safely start processing works on false return.  On
1485e22bee78STejun Heo  * true return, it's guaranteed that need_to_create_worker() is false
1486e22bee78STejun Heo  * and may_start_working() is true.
1487e22bee78STejun Heo  *
1488e22bee78STejun Heo  * CONTEXT:
1489e22bee78STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1490e22bee78STejun Heo  * multiple times.  Does GFP_KERNEL allocations.
1491e22bee78STejun Heo  *
1492e22bee78STejun Heo  * RETURNS:
1493e22bee78STejun Heo  * false if no action was taken and gcwq->lock stayed locked, true if
1494e22bee78STejun Heo  * some action was taken.
1495e22bee78STejun Heo  */
1496e22bee78STejun Heo static bool manage_workers(struct worker *worker)
1497e22bee78STejun Heo {
1498e22bee78STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1499e22bee78STejun Heo 	bool ret = false;
1500e22bee78STejun Heo 
1501e22bee78STejun Heo 	if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1502e22bee78STejun Heo 		return ret;
1503e22bee78STejun Heo 
1504e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1505e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
1506e22bee78STejun Heo 
1507e22bee78STejun Heo 	/*
1508e22bee78STejun Heo 	 * Destroy and then create so that may_start_working() is true
1509e22bee78STejun Heo 	 * on return.
1510e22bee78STejun Heo 	 */
1511e22bee78STejun Heo 	ret |= maybe_destroy_workers(gcwq);
1512e22bee78STejun Heo 	ret |= maybe_create_worker(gcwq);
1513e22bee78STejun Heo 
1514e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1515e22bee78STejun Heo 
1516e22bee78STejun Heo 	/*
1517e22bee78STejun Heo 	 * The trustee might be waiting to take over the manager
1518e22bee78STejun Heo 	 * position, tell it we're done.
1519e22bee78STejun Heo 	 */
1520e22bee78STejun Heo 	if (unlikely(gcwq->trustee))
1521e22bee78STejun Heo 		wake_up_all(&gcwq->trustee_wait);
1522e22bee78STejun Heo 
1523e22bee78STejun Heo 	return ret;
1524e22bee78STejun Heo }
1525e22bee78STejun Heo 
1526a62428c0STejun Heo /**
1527affee4b2STejun Heo  * move_linked_works - move linked works to a list
1528affee4b2STejun Heo  * @work: start of series of works to be scheduled
1529affee4b2STejun Heo  * @head: target list to append @work to
1530affee4b2STejun Heo  * @nextp: out paramter for nested worklist walking
1531affee4b2STejun Heo  *
1532affee4b2STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
1533affee4b2STejun Heo  * be scheduled starts at @work and includes any consecutive work with
1534affee4b2STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
1535affee4b2STejun Heo  *
1536affee4b2STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
1537affee4b2STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
1538affee4b2STejun Heo  * nested inside outer list_for_each_entry_safe().
1539affee4b2STejun Heo  *
1540affee4b2STejun Heo  * CONTEXT:
15418b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
1542affee4b2STejun Heo  */
1543affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
1544affee4b2STejun Heo 			      struct work_struct **nextp)
1545affee4b2STejun Heo {
1546affee4b2STejun Heo 	struct work_struct *n;
1547affee4b2STejun Heo 
1548affee4b2STejun Heo 	/*
1549affee4b2STejun Heo 	 * Linked worklist will always end before the end of the list,
1550affee4b2STejun Heo 	 * use NULL for list head.
1551affee4b2STejun Heo 	 */
1552affee4b2STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
1553affee4b2STejun Heo 		list_move_tail(&work->entry, head);
1554affee4b2STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1555affee4b2STejun Heo 			break;
1556affee4b2STejun Heo 	}
1557affee4b2STejun Heo 
1558affee4b2STejun Heo 	/*
1559affee4b2STejun Heo 	 * If we're already inside safe list traversal and have moved
1560affee4b2STejun Heo 	 * multiple works to the scheduled queue, the next position
1561affee4b2STejun Heo 	 * needs to be updated.
1562affee4b2STejun Heo 	 */
1563affee4b2STejun Heo 	if (nextp)
1564affee4b2STejun Heo 		*nextp = n;
1565affee4b2STejun Heo }
1566affee4b2STejun Heo 
15671e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
15681e19ffc6STejun Heo {
15691e19ffc6STejun Heo 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
15701e19ffc6STejun Heo 						    struct work_struct, entry);
1571*649027d7STejun Heo 	struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
15721e19ffc6STejun Heo 
1573*649027d7STejun Heo 	move_linked_works(work, pos, NULL);
15741e19ffc6STejun Heo 	cwq->nr_active++;
15751e19ffc6STejun Heo }
15761e19ffc6STejun Heo 
1577affee4b2STejun Heo /**
157873f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
157973f53c4aSTejun Heo  * @cwq: cwq of interest
158073f53c4aSTejun Heo  * @color: color of work which left the queue
158173f53c4aSTejun Heo  *
158273f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
158373f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
158473f53c4aSTejun Heo  *
158573f53c4aSTejun Heo  * CONTEXT:
15868b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
158773f53c4aSTejun Heo  */
158873f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
158973f53c4aSTejun Heo {
159073f53c4aSTejun Heo 	/* ignore uncolored works */
159173f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
159273f53c4aSTejun Heo 		return;
159373f53c4aSTejun Heo 
159473f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
15951e19ffc6STejun Heo 	cwq->nr_active--;
15961e19ffc6STejun Heo 
1597502ca9d8STejun Heo 	if (!list_empty(&cwq->delayed_works)) {
15981e19ffc6STejun Heo 		/* one down, submit a delayed one */
1599502ca9d8STejun Heo 		if (cwq->nr_active < cwq->max_active)
16001e19ffc6STejun Heo 			cwq_activate_first_delayed(cwq);
1601502ca9d8STejun Heo 	} else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
1602502ca9d8STejun Heo 		/* this was the last work, unbind from single cpu */
1603502ca9d8STejun Heo 		cwq_unbind_single_cpu(cwq);
1604502ca9d8STejun Heo 	}
160573f53c4aSTejun Heo 
160673f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
160773f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
160873f53c4aSTejun Heo 		return;
160973f53c4aSTejun Heo 
161073f53c4aSTejun Heo 	/* are there still in-flight works? */
161173f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
161273f53c4aSTejun Heo 		return;
161373f53c4aSTejun Heo 
161473f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
161573f53c4aSTejun Heo 	cwq->flush_color = -1;
161673f53c4aSTejun Heo 
161773f53c4aSTejun Heo 	/*
161873f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
161973f53c4aSTejun Heo 	 * will handle the rest.
162073f53c4aSTejun Heo 	 */
162173f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
162273f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
162373f53c4aSTejun Heo }
162473f53c4aSTejun Heo 
162573f53c4aSTejun Heo /**
1626a62428c0STejun Heo  * process_one_work - process single work
1627c34056a3STejun Heo  * @worker: self
1628a62428c0STejun Heo  * @work: work to process
1629a62428c0STejun Heo  *
1630a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
1631a62428c0STejun Heo  * process a single work including synchronization against and
1632a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
1633a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
1634a62428c0STejun Heo  * call this function to process a work.
1635a62428c0STejun Heo  *
1636a62428c0STejun Heo  * CONTEXT:
16378b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1638a62428c0STejun Heo  */
1639c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
16401da177e4SLinus Torvalds {
16417e11629dSTejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
16428b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
1643c8e55f36STejun Heo 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
16446bb49e59SDavid Howells 	work_func_t f = work->func;
164573f53c4aSTejun Heo 	int work_color;
16467e11629dSTejun Heo 	struct worker *collision;
16474e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
16484e6045f1SJohannes Berg 	/*
1649a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
1650a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
1651a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
1652a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
1653a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
16544e6045f1SJohannes Berg 	 */
16554e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
16564e6045f1SJohannes Berg #endif
16577e11629dSTejun Heo 	/*
16587e11629dSTejun Heo 	 * A single work shouldn't be executed concurrently by
16597e11629dSTejun Heo 	 * multiple workers on a single cpu.  Check whether anyone is
16607e11629dSTejun Heo 	 * already processing the work.  If so, defer the work to the
16617e11629dSTejun Heo 	 * currently executing one.
16627e11629dSTejun Heo 	 */
16637e11629dSTejun Heo 	collision = __find_worker_executing_work(gcwq, bwh, work);
16647e11629dSTejun Heo 	if (unlikely(collision)) {
16657e11629dSTejun Heo 		move_linked_works(work, &collision->scheduled, NULL);
16667e11629dSTejun Heo 		return;
16677e11629dSTejun Heo 	}
16687e11629dSTejun Heo 
1669a62428c0STejun Heo 	/* claim and process */
1670dc186ad7SThomas Gleixner 	debug_work_deactivate(work);
1671c8e55f36STejun Heo 	hlist_add_head(&worker->hentry, bwh);
1672c34056a3STejun Heo 	worker->current_work = work;
16738cca0eeaSTejun Heo 	worker->current_cwq = cwq;
167473f53c4aSTejun Heo 	work_color = get_work_color(work);
16757a22ad75STejun Heo 
16767a22ad75STejun Heo 	/* record the current cpu number in the work data and dequeue */
16777a22ad75STejun Heo 	set_work_cpu(work, gcwq->cpu);
1678a62428c0STejun Heo 	list_del_init(&work->entry);
1679a62428c0STejun Heo 
1680*649027d7STejun Heo 	/*
1681*649027d7STejun Heo 	 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1682*649027d7STejun Heo 	 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1683*649027d7STejun Heo 	 */
1684*649027d7STejun Heo 	if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1685*649027d7STejun Heo 		struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1686*649027d7STejun Heo 						struct work_struct, entry);
1687*649027d7STejun Heo 
1688*649027d7STejun Heo 		if (!list_empty(&gcwq->worklist) &&
1689*649027d7STejun Heo 		    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1690*649027d7STejun Heo 			wake_up_worker(gcwq);
1691*649027d7STejun Heo 		else
1692*649027d7STejun Heo 			gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1693*649027d7STejun Heo 	}
1694*649027d7STejun Heo 
16958b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
16961da177e4SLinus Torvalds 
169723b2e599SOleg Nesterov 	work_clear_pending(work);
16983295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
16993295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
170065f27f38SDavid Howells 	f(work);
17013295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
17023295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
17031da177e4SLinus Torvalds 
1704d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1705d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1706d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
1707a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
1708d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
1709d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
1710d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
1711d5abe669SPeter Zijlstra 		dump_stack();
1712d5abe669SPeter Zijlstra 	}
1713d5abe669SPeter Zijlstra 
17148b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1715a62428c0STejun Heo 
1716a62428c0STejun Heo 	/* we're done with it, release */
1717c8e55f36STejun Heo 	hlist_del_init(&worker->hentry);
1718c34056a3STejun Heo 	worker->current_work = NULL;
17198cca0eeaSTejun Heo 	worker->current_cwq = NULL;
172073f53c4aSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color);
17211da177e4SLinus Torvalds }
1722a62428c0STejun Heo 
1723affee4b2STejun Heo /**
1724affee4b2STejun Heo  * process_scheduled_works - process scheduled works
1725affee4b2STejun Heo  * @worker: self
1726affee4b2STejun Heo  *
1727affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
1728affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
1729affee4b2STejun Heo  * fetches a work from the top and executes it.
1730affee4b2STejun Heo  *
1731affee4b2STejun Heo  * CONTEXT:
17328b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1733affee4b2STejun Heo  * multiple times.
1734affee4b2STejun Heo  */
1735affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
1736a62428c0STejun Heo {
1737affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
1738affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
1739a62428c0STejun Heo 						struct work_struct, entry);
1740c34056a3STejun Heo 		process_one_work(worker, work);
1741a62428c0STejun Heo 	}
17421da177e4SLinus Torvalds }
17431da177e4SLinus Torvalds 
17444690c4abSTejun Heo /**
17454690c4abSTejun Heo  * worker_thread - the worker thread function
1746c34056a3STejun Heo  * @__worker: self
17474690c4abSTejun Heo  *
1748e22bee78STejun Heo  * The gcwq worker thread function.  There's a single dynamic pool of
1749e22bee78STejun Heo  * these per each cpu.  These workers process all works regardless of
1750e22bee78STejun Heo  * their specific target workqueue.  The only exception is works which
1751e22bee78STejun Heo  * belong to workqueues with a rescuer which will be explained in
1752e22bee78STejun Heo  * rescuer_thread().
17534690c4abSTejun Heo  */
1754c34056a3STejun Heo static int worker_thread(void *__worker)
17551da177e4SLinus Torvalds {
1756c34056a3STejun Heo 	struct worker *worker = __worker;
17578b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
17581da177e4SLinus Torvalds 
1759e22bee78STejun Heo 	/* tell the scheduler that this is a workqueue worker */
1760e22bee78STejun Heo 	worker->task->flags |= PF_WQ_WORKER;
1761c8e55f36STejun Heo woke_up:
17628b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1763affee4b2STejun Heo 
1764c8e55f36STejun Heo 	/* DIE can be set only while we're idle, checking here is enough */
1765c8e55f36STejun Heo 	if (worker->flags & WORKER_DIE) {
1766c8e55f36STejun Heo 		spin_unlock_irq(&gcwq->lock);
1767e22bee78STejun Heo 		worker->task->flags &= ~PF_WQ_WORKER;
1768c8e55f36STejun Heo 		return 0;
1769c8e55f36STejun Heo 	}
1770c8e55f36STejun Heo 
1771c8e55f36STejun Heo 	worker_leave_idle(worker);
1772db7bccf4STejun Heo recheck:
1773e22bee78STejun Heo 	/* no more worker necessary? */
1774e22bee78STejun Heo 	if (!need_more_worker(gcwq))
1775e22bee78STejun Heo 		goto sleep;
1776e22bee78STejun Heo 
1777e22bee78STejun Heo 	/* do we need to manage? */
1778e22bee78STejun Heo 	if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1779e22bee78STejun Heo 		goto recheck;
1780e22bee78STejun Heo 
1781c8e55f36STejun Heo 	/*
1782c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
1783c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
1784c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
1785c8e55f36STejun Heo 	 */
1786c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1787c8e55f36STejun Heo 
1788e22bee78STejun Heo 	/*
1789e22bee78STejun Heo 	 * When control reaches this point, we're guaranteed to have
1790e22bee78STejun Heo 	 * at least one idle worker or that someone else has already
1791e22bee78STejun Heo 	 * assumed the manager role.
1792e22bee78STejun Heo 	 */
1793e22bee78STejun Heo 	worker_clr_flags(worker, WORKER_PREP);
1794e22bee78STejun Heo 
1795e22bee78STejun Heo 	do {
1796affee4b2STejun Heo 		struct work_struct *work =
17977e11629dSTejun Heo 			list_first_entry(&gcwq->worklist,
1798affee4b2STejun Heo 					 struct work_struct, entry);
1799affee4b2STejun Heo 
1800c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1801affee4b2STejun Heo 			/* optimization path, not strictly necessary */
1802affee4b2STejun Heo 			process_one_work(worker, work);
1803affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
1804affee4b2STejun Heo 				process_scheduled_works(worker);
1805affee4b2STejun Heo 		} else {
1806c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
1807affee4b2STejun Heo 			process_scheduled_works(worker);
1808affee4b2STejun Heo 		}
1809e22bee78STejun Heo 	} while (keep_working(gcwq));
1810affee4b2STejun Heo 
1811e22bee78STejun Heo 	worker_set_flags(worker, WORKER_PREP, false);
1812e22bee78STejun Heo 
1813e22bee78STejun Heo 	if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1814e22bee78STejun Heo 		goto recheck;
1815e22bee78STejun Heo sleep:
1816c8e55f36STejun Heo 	/*
1817e22bee78STejun Heo 	 * gcwq->lock is held and there's no work to process and no
1818e22bee78STejun Heo 	 * need to manage, sleep.  Workers are woken up only while
1819e22bee78STejun Heo 	 * holding gcwq->lock or from local cpu, so setting the
1820e22bee78STejun Heo 	 * current state before releasing gcwq->lock is enough to
1821e22bee78STejun Heo 	 * prevent losing any event.
1822c8e55f36STejun Heo 	 */
1823c8e55f36STejun Heo 	worker_enter_idle(worker);
1824c8e55f36STejun Heo 	__set_current_state(TASK_INTERRUPTIBLE);
18258b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1826c8e55f36STejun Heo 	schedule();
1827c8e55f36STejun Heo 	goto woke_up;
18281da177e4SLinus Torvalds }
18291da177e4SLinus Torvalds 
1830e22bee78STejun Heo /**
1831e22bee78STejun Heo  * rescuer_thread - the rescuer thread function
1832e22bee78STejun Heo  * @__wq: the associated workqueue
1833e22bee78STejun Heo  *
1834e22bee78STejun Heo  * Workqueue rescuer thread function.  There's one rescuer for each
1835e22bee78STejun Heo  * workqueue which has WQ_RESCUER set.
1836e22bee78STejun Heo  *
1837e22bee78STejun Heo  * Regular work processing on a gcwq may block trying to create a new
1838e22bee78STejun Heo  * worker which uses GFP_KERNEL allocation which has slight chance of
1839e22bee78STejun Heo  * developing into deadlock if some works currently on the same queue
1840e22bee78STejun Heo  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
1841e22bee78STejun Heo  * the problem rescuer solves.
1842e22bee78STejun Heo  *
1843e22bee78STejun Heo  * When such condition is possible, the gcwq summons rescuers of all
1844e22bee78STejun Heo  * workqueues which have works queued on the gcwq and let them process
1845e22bee78STejun Heo  * those works so that forward progress can be guaranteed.
1846e22bee78STejun Heo  *
1847e22bee78STejun Heo  * This should happen rarely.
1848e22bee78STejun Heo  */
1849e22bee78STejun Heo static int rescuer_thread(void *__wq)
1850e22bee78STejun Heo {
1851e22bee78STejun Heo 	struct workqueue_struct *wq = __wq;
1852e22bee78STejun Heo 	struct worker *rescuer = wq->rescuer;
1853e22bee78STejun Heo 	struct list_head *scheduled = &rescuer->scheduled;
1854e22bee78STejun Heo 	unsigned int cpu;
1855e22bee78STejun Heo 
1856e22bee78STejun Heo 	set_user_nice(current, RESCUER_NICE_LEVEL);
1857e22bee78STejun Heo repeat:
1858e22bee78STejun Heo 	set_current_state(TASK_INTERRUPTIBLE);
1859e22bee78STejun Heo 
1860e22bee78STejun Heo 	if (kthread_should_stop())
1861e22bee78STejun Heo 		return 0;
1862e22bee78STejun Heo 
1863e22bee78STejun Heo 	for_each_cpu(cpu, wq->mayday_mask) {
1864e22bee78STejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1865e22bee78STejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
1866e22bee78STejun Heo 		struct work_struct *work, *n;
1867e22bee78STejun Heo 
1868e22bee78STejun Heo 		__set_current_state(TASK_RUNNING);
1869e22bee78STejun Heo 		cpumask_clear_cpu(cpu, wq->mayday_mask);
1870e22bee78STejun Heo 
1871e22bee78STejun Heo 		/* migrate to the target cpu if possible */
1872e22bee78STejun Heo 		rescuer->gcwq = gcwq;
1873e22bee78STejun Heo 		worker_maybe_bind_and_lock(rescuer);
1874e22bee78STejun Heo 
1875e22bee78STejun Heo 		/*
1876e22bee78STejun Heo 		 * Slurp in all works issued via this workqueue and
1877e22bee78STejun Heo 		 * process'em.
1878e22bee78STejun Heo 		 */
1879e22bee78STejun Heo 		BUG_ON(!list_empty(&rescuer->scheduled));
1880e22bee78STejun Heo 		list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
1881e22bee78STejun Heo 			if (get_work_cwq(work) == cwq)
1882e22bee78STejun Heo 				move_linked_works(work, scheduled, &n);
1883e22bee78STejun Heo 
1884e22bee78STejun Heo 		process_scheduled_works(rescuer);
1885e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
1886e22bee78STejun Heo 	}
1887e22bee78STejun Heo 
1888e22bee78STejun Heo 	schedule();
1889e22bee78STejun Heo 	goto repeat;
1890e22bee78STejun Heo }
1891e22bee78STejun Heo 
1892fc2e4d70SOleg Nesterov struct wq_barrier {
1893fc2e4d70SOleg Nesterov 	struct work_struct	work;
1894fc2e4d70SOleg Nesterov 	struct completion	done;
1895fc2e4d70SOleg Nesterov };
1896fc2e4d70SOleg Nesterov 
1897fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
1898fc2e4d70SOleg Nesterov {
1899fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1900fc2e4d70SOleg Nesterov 	complete(&barr->done);
1901fc2e4d70SOleg Nesterov }
1902fc2e4d70SOleg Nesterov 
19034690c4abSTejun Heo /**
19044690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
19054690c4abSTejun Heo  * @cwq: cwq to insert barrier into
19064690c4abSTejun Heo  * @barr: wq_barrier to insert
1907affee4b2STejun Heo  * @target: target work to attach @barr to
1908affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
19094690c4abSTejun Heo  *
1910affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
1911affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
1912affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
1913affee4b2STejun Heo  * cpu.
1914affee4b2STejun Heo  *
1915affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
1916affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
1917affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
1918affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
1919affee4b2STejun Heo  * after a work with LINKED flag set.
1920affee4b2STejun Heo  *
1921affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
1922affee4b2STejun Heo  * underneath us, so we can't reliably determine cwq from @target.
19234690c4abSTejun Heo  *
19244690c4abSTejun Heo  * CONTEXT:
19258b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
19264690c4abSTejun Heo  */
192783c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1928affee4b2STejun Heo 			      struct wq_barrier *barr,
1929affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
1930fc2e4d70SOleg Nesterov {
1931affee4b2STejun Heo 	struct list_head *head;
1932affee4b2STejun Heo 	unsigned int linked = 0;
1933affee4b2STejun Heo 
1934dc186ad7SThomas Gleixner 	/*
19358b03ae3cSTejun Heo 	 * debugobject calls are safe here even with gcwq->lock locked
1936dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
1937dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
1938dc186ad7SThomas Gleixner 	 * might deadlock.
1939dc186ad7SThomas Gleixner 	 */
1940dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
194122df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1942fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
194383c22520SOleg Nesterov 
1944affee4b2STejun Heo 	/*
1945affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
1946affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
1947affee4b2STejun Heo 	 */
1948affee4b2STejun Heo 	if (worker)
1949affee4b2STejun Heo 		head = worker->scheduled.next;
1950affee4b2STejun Heo 	else {
1951affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
1952affee4b2STejun Heo 
1953affee4b2STejun Heo 		head = target->entry.next;
1954affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
1955affee4b2STejun Heo 		linked = *bits & WORK_STRUCT_LINKED;
1956affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
1957affee4b2STejun Heo 	}
1958affee4b2STejun Heo 
1959dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
1960affee4b2STejun Heo 	insert_work(cwq, &barr->work, head,
1961affee4b2STejun Heo 		    work_color_to_flags(WORK_NO_COLOR) | linked);
1962fc2e4d70SOleg Nesterov }
1963fc2e4d70SOleg Nesterov 
196473f53c4aSTejun Heo /**
196573f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
196673f53c4aSTejun Heo  * @wq: workqueue being flushed
196773f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
196873f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
196973f53c4aSTejun Heo  *
197073f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
197173f53c4aSTejun Heo  *
197273f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
197373f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
197473f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
197573f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
197673f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
197773f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
197873f53c4aSTejun Heo  *
197973f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
198073f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
198173f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
198273f53c4aSTejun Heo  * is returned.
198373f53c4aSTejun Heo  *
198473f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
198573f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
198673f53c4aSTejun Heo  * advanced to @work_color.
198773f53c4aSTejun Heo  *
198873f53c4aSTejun Heo  * CONTEXT:
198973f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
199073f53c4aSTejun Heo  *
199173f53c4aSTejun Heo  * RETURNS:
199273f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
199373f53c4aSTejun Heo  * otherwise.
199473f53c4aSTejun Heo  */
199573f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
199673f53c4aSTejun Heo 				      int flush_color, int work_color)
19971da177e4SLinus Torvalds {
199873f53c4aSTejun Heo 	bool wait = false;
199973f53c4aSTejun Heo 	unsigned int cpu;
20001da177e4SLinus Torvalds 
200173f53c4aSTejun Heo 	if (flush_color >= 0) {
200273f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
200373f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
200473f53c4aSTejun Heo 	}
200573f53c4aSTejun Heo 
200673f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
200773f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
20088b03ae3cSTejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
20092355b70fSLai Jiangshan 
20108b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
201173f53c4aSTejun Heo 
201273f53c4aSTejun Heo 		if (flush_color >= 0) {
201373f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
201473f53c4aSTejun Heo 
201573f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
201673f53c4aSTejun Heo 				cwq->flush_color = flush_color;
201773f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
201873f53c4aSTejun Heo 				wait = true;
201983c22520SOleg Nesterov 			}
202073f53c4aSTejun Heo 		}
202173f53c4aSTejun Heo 
202273f53c4aSTejun Heo 		if (work_color >= 0) {
202373f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
202473f53c4aSTejun Heo 			cwq->work_color = work_color;
202573f53c4aSTejun Heo 		}
202673f53c4aSTejun Heo 
20278b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2028dc186ad7SThomas Gleixner 	}
202914441960SOleg Nesterov 
203073f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
203173f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
203273f53c4aSTejun Heo 
203373f53c4aSTejun Heo 	return wait;
203483c22520SOleg Nesterov }
20351da177e4SLinus Torvalds 
20360fcb78c2SRolf Eike Beer /**
20371da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
20380fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
20391da177e4SLinus Torvalds  *
20401da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
20411da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
20421da177e4SLinus Torvalds  *
2043fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
2044fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
20451da177e4SLinus Torvalds  */
20467ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
20471da177e4SLinus Torvalds {
204873f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
204973f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
205073f53c4aSTejun Heo 		.flush_color = -1,
205173f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
205273f53c4aSTejun Heo 	};
205373f53c4aSTejun Heo 	int next_color;
2054b1f4ec17SOleg Nesterov 
20553295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
20563295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
205773f53c4aSTejun Heo 
205873f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
205973f53c4aSTejun Heo 
206073f53c4aSTejun Heo 	/*
206173f53c4aSTejun Heo 	 * Start-to-wait phase
206273f53c4aSTejun Heo 	 */
206373f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
206473f53c4aSTejun Heo 
206573f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
206673f53c4aSTejun Heo 		/*
206773f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
206873f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
206973f53c4aSTejun Heo 		 * by one.
207073f53c4aSTejun Heo 		 */
207173f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
207273f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
207373f53c4aSTejun Heo 		wq->work_color = next_color;
207473f53c4aSTejun Heo 
207573f53c4aSTejun Heo 		if (!wq->first_flusher) {
207673f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
207773f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
207873f53c4aSTejun Heo 
207973f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
208073f53c4aSTejun Heo 
208173f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
208273f53c4aSTejun Heo 						       wq->work_color)) {
208373f53c4aSTejun Heo 				/* nothing to flush, done */
208473f53c4aSTejun Heo 				wq->flush_color = next_color;
208573f53c4aSTejun Heo 				wq->first_flusher = NULL;
208673f53c4aSTejun Heo 				goto out_unlock;
208773f53c4aSTejun Heo 			}
208873f53c4aSTejun Heo 		} else {
208973f53c4aSTejun Heo 			/* wait in queue */
209073f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
209173f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
209273f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
209373f53c4aSTejun Heo 		}
209473f53c4aSTejun Heo 	} else {
209573f53c4aSTejun Heo 		/*
209673f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
209773f53c4aSTejun Heo 		 * The next flush completion will assign us
209873f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
209973f53c4aSTejun Heo 		 */
210073f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
210173f53c4aSTejun Heo 	}
210273f53c4aSTejun Heo 
210373f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
210473f53c4aSTejun Heo 
210573f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
210673f53c4aSTejun Heo 
210773f53c4aSTejun Heo 	/*
210873f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
210973f53c4aSTejun Heo 	 *
211073f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
211173f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
211273f53c4aSTejun Heo 	 */
211373f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
211473f53c4aSTejun Heo 		return;
211573f53c4aSTejun Heo 
211673f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
211773f53c4aSTejun Heo 
211873f53c4aSTejun Heo 	wq->first_flusher = NULL;
211973f53c4aSTejun Heo 
212073f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
212173f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
212273f53c4aSTejun Heo 
212373f53c4aSTejun Heo 	while (true) {
212473f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
212573f53c4aSTejun Heo 
212673f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
212773f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
212873f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
212973f53c4aSTejun Heo 				break;
213073f53c4aSTejun Heo 			list_del_init(&next->list);
213173f53c4aSTejun Heo 			complete(&next->done);
213273f53c4aSTejun Heo 		}
213373f53c4aSTejun Heo 
213473f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
213573f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
213673f53c4aSTejun Heo 
213773f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
213873f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
213973f53c4aSTejun Heo 
214073f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
214173f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
214273f53c4aSTejun Heo 			/*
214373f53c4aSTejun Heo 			 * Assign the same color to all overflowed
214473f53c4aSTejun Heo 			 * flushers, advance work_color and append to
214573f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
214673f53c4aSTejun Heo 			 * phase for these overflowed flushers.
214773f53c4aSTejun Heo 			 */
214873f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
214973f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
215073f53c4aSTejun Heo 
215173f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
215273f53c4aSTejun Heo 
215373f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
215473f53c4aSTejun Heo 					      &wq->flusher_queue);
215573f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
215673f53c4aSTejun Heo 		}
215773f53c4aSTejun Heo 
215873f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
215973f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
216073f53c4aSTejun Heo 			break;
216173f53c4aSTejun Heo 		}
216273f53c4aSTejun Heo 
216373f53c4aSTejun Heo 		/*
216473f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
216573f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
216673f53c4aSTejun Heo 		 */
216773f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
216873f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
216973f53c4aSTejun Heo 
217073f53c4aSTejun Heo 		list_del_init(&next->list);
217173f53c4aSTejun Heo 		wq->first_flusher = next;
217273f53c4aSTejun Heo 
217373f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
217473f53c4aSTejun Heo 			break;
217573f53c4aSTejun Heo 
217673f53c4aSTejun Heo 		/*
217773f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
217873f53c4aSTejun Heo 		 * flusher and repeat cascading.
217973f53c4aSTejun Heo 		 */
218073f53c4aSTejun Heo 		wq->first_flusher = NULL;
218173f53c4aSTejun Heo 	}
218273f53c4aSTejun Heo 
218373f53c4aSTejun Heo out_unlock:
218473f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
21851da177e4SLinus Torvalds }
2186ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
21871da177e4SLinus Torvalds 
2188db700897SOleg Nesterov /**
2189db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
2190db700897SOleg Nesterov  * @work: the work which is to be flushed
2191db700897SOleg Nesterov  *
2192a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
2193a67da70dSOleg Nesterov  *
2194db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
2195db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
2196db700897SOleg Nesterov  * sense to use this function.
2197db700897SOleg Nesterov  */
2198db700897SOleg Nesterov int flush_work(struct work_struct *work)
2199db700897SOleg Nesterov {
2200affee4b2STejun Heo 	struct worker *worker = NULL;
22018b03ae3cSTejun Heo 	struct global_cwq *gcwq;
22027a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq;
2203db700897SOleg Nesterov 	struct wq_barrier barr;
2204db700897SOleg Nesterov 
2205db700897SOleg Nesterov 	might_sleep();
22067a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
22077a22ad75STejun Heo 	if (!gcwq)
2208db700897SOleg Nesterov 		return 0;
2209a67da70dSOleg Nesterov 
22108b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2211db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
2212db700897SOleg Nesterov 		/*
2213db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
22147a22ad75STejun Heo 		 * If it was re-queued to a different gcwq under us, we
22157a22ad75STejun Heo 		 * are not going to wait.
2216db700897SOleg Nesterov 		 */
2217db700897SOleg Nesterov 		smp_rmb();
22187a22ad75STejun Heo 		cwq = get_work_cwq(work);
22197a22ad75STejun Heo 		if (unlikely(!cwq || gcwq != cwq->gcwq))
22204690c4abSTejun Heo 			goto already_gone;
2221db700897SOleg Nesterov 	} else {
22227a22ad75STejun Heo 		worker = find_worker_executing_work(gcwq, work);
2223affee4b2STejun Heo 		if (!worker)
22244690c4abSTejun Heo 			goto already_gone;
22257a22ad75STejun Heo 		cwq = worker->current_cwq;
2226db700897SOleg Nesterov 	}
2227db700897SOleg Nesterov 
2228affee4b2STejun Heo 	insert_wq_barrier(cwq, &barr, work, worker);
22298b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
22307a22ad75STejun Heo 
22317a22ad75STejun Heo 	lock_map_acquire(&cwq->wq->lockdep_map);
22327a22ad75STejun Heo 	lock_map_release(&cwq->wq->lockdep_map);
22337a22ad75STejun Heo 
2234db700897SOleg Nesterov 	wait_for_completion(&barr.done);
2235dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
2236db700897SOleg Nesterov 	return 1;
22374690c4abSTejun Heo already_gone:
22388b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
22394690c4abSTejun Heo 	return 0;
2240db700897SOleg Nesterov }
2241db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
2242db700897SOleg Nesterov 
22436e84d644SOleg Nesterov /*
22441f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
22456e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
22466e84d644SOleg Nesterov  */
22476e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
22486e84d644SOleg Nesterov {
22498b03ae3cSTejun Heo 	struct global_cwq *gcwq;
22501f1f642eSOleg Nesterov 	int ret = -1;
22516e84d644SOleg Nesterov 
225222df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
22531f1f642eSOleg Nesterov 		return 0;
22546e84d644SOleg Nesterov 
22556e84d644SOleg Nesterov 	/*
22566e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
22576e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
22586e84d644SOleg Nesterov 	 */
22597a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
22607a22ad75STejun Heo 	if (!gcwq)
22616e84d644SOleg Nesterov 		return ret;
22626e84d644SOleg Nesterov 
22638b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
22646e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
22656e84d644SOleg Nesterov 		/*
22667a22ad75STejun Heo 		 * This work is queued, but perhaps we locked the wrong gcwq.
22676e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
22686e84d644SOleg Nesterov 		 * insert_work()->wmb().
22696e84d644SOleg Nesterov 		 */
22706e84d644SOleg Nesterov 		smp_rmb();
22717a22ad75STejun Heo 		if (gcwq == get_work_gcwq(work)) {
2272dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
22736e84d644SOleg Nesterov 			list_del_init(&work->entry);
22747a22ad75STejun Heo 			cwq_dec_nr_in_flight(get_work_cwq(work),
22757a22ad75STejun Heo 					     get_work_color(work));
22766e84d644SOleg Nesterov 			ret = 1;
22776e84d644SOleg Nesterov 		}
22786e84d644SOleg Nesterov 	}
22798b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
22806e84d644SOleg Nesterov 
22816e84d644SOleg Nesterov 	return ret;
22826e84d644SOleg Nesterov }
22836e84d644SOleg Nesterov 
22847a22ad75STejun Heo static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2285b89deed3SOleg Nesterov {
2286b89deed3SOleg Nesterov 	struct wq_barrier barr;
2287affee4b2STejun Heo 	struct worker *worker;
2288b89deed3SOleg Nesterov 
22898b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
2290affee4b2STejun Heo 
22917a22ad75STejun Heo 	worker = find_worker_executing_work(gcwq, work);
22927a22ad75STejun Heo 	if (unlikely(worker))
22937a22ad75STejun Heo 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2294affee4b2STejun Heo 
22958b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
2296b89deed3SOleg Nesterov 
2297affee4b2STejun Heo 	if (unlikely(worker)) {
2298b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
2299dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
2300dc186ad7SThomas Gleixner 	}
2301b89deed3SOleg Nesterov }
2302b89deed3SOleg Nesterov 
23036e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
2304b89deed3SOleg Nesterov {
2305b1f4ec17SOleg Nesterov 	int cpu;
2306b89deed3SOleg Nesterov 
2307f293ea92SOleg Nesterov 	might_sleep();
2308f293ea92SOleg Nesterov 
23093295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
23103295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
23114e6045f1SJohannes Berg 
23121537663fSTejun Heo 	for_each_possible_cpu(cpu)
23137a22ad75STejun Heo 		wait_on_cpu_work(get_gcwq(cpu), work);
23146e84d644SOleg Nesterov }
23156e84d644SOleg Nesterov 
23161f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
23171f1f642eSOleg Nesterov 				struct timer_list* timer)
23181f1f642eSOleg Nesterov {
23191f1f642eSOleg Nesterov 	int ret;
23201f1f642eSOleg Nesterov 
23211f1f642eSOleg Nesterov 	do {
23221f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
23231f1f642eSOleg Nesterov 		if (!ret)
23241f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
23251f1f642eSOleg Nesterov 		wait_on_work(work);
23261f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
23271f1f642eSOleg Nesterov 
23287a22ad75STejun Heo 	clear_work_data(work);
23291f1f642eSOleg Nesterov 	return ret;
23301f1f642eSOleg Nesterov }
23311f1f642eSOleg Nesterov 
23326e84d644SOleg Nesterov /**
23336e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
23346e84d644SOleg Nesterov  * @work: the work which is to be flushed
23356e84d644SOleg Nesterov  *
23361f1f642eSOleg Nesterov  * Returns true if @work was pending.
23371f1f642eSOleg Nesterov  *
23386e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
23396e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
23406e84d644SOleg Nesterov  * has completed.
23416e84d644SOleg Nesterov  *
23426e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
23436e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
23446e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
23456e84d644SOleg Nesterov  * workqueue.
23466e84d644SOleg Nesterov  *
23476e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
23486e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
23496e84d644SOleg Nesterov  *
23506e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
23516e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
23526e84d644SOleg Nesterov  */
23531f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
23546e84d644SOleg Nesterov {
23551f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
2356b89deed3SOleg Nesterov }
235728e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
2358b89deed3SOleg Nesterov 
23596e84d644SOleg Nesterov /**
2360f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
23616e84d644SOleg Nesterov  * @dwork: the delayed work struct
23626e84d644SOleg Nesterov  *
23631f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
23641f1f642eSOleg Nesterov  *
23656e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
23666e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
23676e84d644SOleg Nesterov  */
23681f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
23696e84d644SOleg Nesterov {
23701f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
23716e84d644SOleg Nesterov }
2372f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
23731da177e4SLinus Torvalds 
23740fcb78c2SRolf Eike Beer /**
23750fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
23760fcb78c2SRolf Eike Beer  * @work: job to be done
23770fcb78c2SRolf Eike Beer  *
23785b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
23795b0f437dSBart Van Assche  * non-zero otherwise.
23805b0f437dSBart Van Assche  *
23815b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
23825b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
23835b0f437dSBart Van Assche  * workqueue otherwise.
23840fcb78c2SRolf Eike Beer  */
23857ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
23861da177e4SLinus Torvalds {
2387d320c038STejun Heo 	return queue_work(system_wq, work);
23881da177e4SLinus Torvalds }
2389ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
23901da177e4SLinus Torvalds 
2391c1a220e7SZhang Rui /*
2392c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
2393c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
2394c1a220e7SZhang Rui  * @work: job to be done
2395c1a220e7SZhang Rui  *
2396c1a220e7SZhang Rui  * This puts a job on a specific cpu
2397c1a220e7SZhang Rui  */
2398c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
2399c1a220e7SZhang Rui {
2400d320c038STejun Heo 	return queue_work_on(cpu, system_wq, work);
2401c1a220e7SZhang Rui }
2402c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
2403c1a220e7SZhang Rui 
24040fcb78c2SRolf Eike Beer /**
24050fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
240652bad64dSDavid Howells  * @dwork: job to be done
240752bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
24080fcb78c2SRolf Eike Beer  *
24090fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
24100fcb78c2SRolf Eike Beer  * workqueue.
24110fcb78c2SRolf Eike Beer  */
24127ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
241382f67cd9SIngo Molnar 					unsigned long delay)
24141da177e4SLinus Torvalds {
2415d320c038STejun Heo 	return queue_delayed_work(system_wq, dwork, delay);
24161da177e4SLinus Torvalds }
2417ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
24181da177e4SLinus Torvalds 
24190fcb78c2SRolf Eike Beer /**
24208c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
24218c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
24228c53e463SLinus Torvalds  *
24238c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
24248c53e463SLinus Torvalds  */
24258c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
24268c53e463SLinus Torvalds {
24278c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
24287a22ad75STejun Heo 		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
24294690c4abSTejun Heo 			     &dwork->work);
24308c53e463SLinus Torvalds 		put_cpu();
24318c53e463SLinus Torvalds 	}
24328c53e463SLinus Torvalds 	flush_work(&dwork->work);
24338c53e463SLinus Torvalds }
24348c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
24358c53e463SLinus Torvalds 
24368c53e463SLinus Torvalds /**
24370fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
24380fcb78c2SRolf Eike Beer  * @cpu: cpu to use
243952bad64dSDavid Howells  * @dwork: job to be done
24400fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
24410fcb78c2SRolf Eike Beer  *
24420fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
24430fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
24440fcb78c2SRolf Eike Beer  */
24451da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
244652bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
24471da177e4SLinus Torvalds {
2448d320c038STejun Heo 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
24491da177e4SLinus Torvalds }
2450ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
24511da177e4SLinus Torvalds 
2452b6136773SAndrew Morton /**
2453b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
2454b6136773SAndrew Morton  * @func: the function to call
2455b6136773SAndrew Morton  *
2456b6136773SAndrew Morton  * Returns zero on success.
2457b6136773SAndrew Morton  * Returns -ve errno on failure.
2458b6136773SAndrew Morton  *
2459b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
2460b6136773SAndrew Morton  */
246165f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
246215316ba8SChristoph Lameter {
246315316ba8SChristoph Lameter 	int cpu;
2464b6136773SAndrew Morton 	struct work_struct *works;
246515316ba8SChristoph Lameter 
2466b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
2467b6136773SAndrew Morton 	if (!works)
246815316ba8SChristoph Lameter 		return -ENOMEM;
2469b6136773SAndrew Morton 
247095402b38SGautham R Shenoy 	get_online_cpus();
247193981800STejun Heo 
247215316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
24739bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
24749bfb1839SIngo Molnar 
24759bfb1839SIngo Molnar 		INIT_WORK(work, func);
24768de6d308SOleg Nesterov 		schedule_work_on(cpu, work);
247715316ba8SChristoph Lameter 	}
247893981800STejun Heo 
247993981800STejun Heo 	for_each_online_cpu(cpu)
24808616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
248193981800STejun Heo 
248295402b38SGautham R Shenoy 	put_online_cpus();
2483b6136773SAndrew Morton 	free_percpu(works);
248415316ba8SChristoph Lameter 	return 0;
248515316ba8SChristoph Lameter }
248615316ba8SChristoph Lameter 
2487eef6a7d5SAlan Stern /**
2488eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
2489eef6a7d5SAlan Stern  *
2490eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
2491eef6a7d5SAlan Stern  * completion.
2492eef6a7d5SAlan Stern  *
2493eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
2494eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
2495eef6a7d5SAlan Stern  * will lead to deadlock:
2496eef6a7d5SAlan Stern  *
2497eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
2498eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
2499eef6a7d5SAlan Stern  *
2500eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
2501eef6a7d5SAlan Stern  *
2502eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
2503eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
2504eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
2505eef6a7d5SAlan Stern  *
2506eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
2507eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
2508eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
2509eef6a7d5SAlan Stern  * cancel_work_sync() instead.
2510eef6a7d5SAlan Stern  */
25111da177e4SLinus Torvalds void flush_scheduled_work(void)
25121da177e4SLinus Torvalds {
2513d320c038STejun Heo 	flush_workqueue(system_wq);
25141da177e4SLinus Torvalds }
2515ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
25161da177e4SLinus Torvalds 
25171da177e4SLinus Torvalds /**
25181fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
25191fa44ecaSJames Bottomley  * @fn:		the function to execute
25201fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
25211fa44ecaSJames Bottomley  *		be available when the work executes)
25221fa44ecaSJames Bottomley  *
25231fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
25241fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
25251fa44ecaSJames Bottomley  *
25261fa44ecaSJames Bottomley  * Returns:	0 - function was executed
25271fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
25281fa44ecaSJames Bottomley  */
252965f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
25301fa44ecaSJames Bottomley {
25311fa44ecaSJames Bottomley 	if (!in_interrupt()) {
253265f27f38SDavid Howells 		fn(&ew->work);
25331fa44ecaSJames Bottomley 		return 0;
25341fa44ecaSJames Bottomley 	}
25351fa44ecaSJames Bottomley 
253665f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
25371fa44ecaSJames Bottomley 	schedule_work(&ew->work);
25381fa44ecaSJames Bottomley 
25391fa44ecaSJames Bottomley 	return 1;
25401fa44ecaSJames Bottomley }
25411fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
25421fa44ecaSJames Bottomley 
25431da177e4SLinus Torvalds int keventd_up(void)
25441da177e4SLinus Torvalds {
2545d320c038STejun Heo 	return system_wq != NULL;
25461da177e4SLinus Torvalds }
25471da177e4SLinus Torvalds 
25480f900049STejun Heo static struct cpu_workqueue_struct *alloc_cwqs(void)
25490f900049STejun Heo {
25500f900049STejun Heo 	/*
25510f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
25520f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
25530f900049STejun Heo 	 * unsigned long long.
25540f900049STejun Heo 	 */
25550f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
25560f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
25570f900049STejun Heo 				   __alignof__(unsigned long long));
25580f900049STejun Heo 	struct cpu_workqueue_struct *cwqs;
25590f900049STejun Heo #ifndef CONFIG_SMP
25600f900049STejun Heo 	void *ptr;
25610f900049STejun Heo 
25620f900049STejun Heo 	/*
25630f900049STejun Heo 	 * On UP, percpu allocator doesn't honor alignment parameter
25640f900049STejun Heo 	 * and simply uses arch-dependent default.  Allocate enough
25650f900049STejun Heo 	 * room to align cwq and put an extra pointer at the end
25660f900049STejun Heo 	 * pointing back to the originally allocated pointer which
25670f900049STejun Heo 	 * will be used for free.
25680f900049STejun Heo 	 *
25690f900049STejun Heo 	 * FIXME: This really belongs to UP percpu code.  Update UP
25700f900049STejun Heo 	 * percpu code to honor alignment and remove this ugliness.
25710f900049STejun Heo 	 */
25720f900049STejun Heo 	ptr = __alloc_percpu(size + align + sizeof(void *), 1);
25730f900049STejun Heo 	cwqs = PTR_ALIGN(ptr, align);
25740f900049STejun Heo 	*(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
25750f900049STejun Heo #else
25760f900049STejun Heo 	/* On SMP, percpu allocator can do it itself */
25770f900049STejun Heo 	cwqs = __alloc_percpu(size, align);
25780f900049STejun Heo #endif
25790f900049STejun Heo 	/* just in case, make sure it's actually aligned */
25800f900049STejun Heo 	BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
25810f900049STejun Heo 	return cwqs;
25820f900049STejun Heo }
25830f900049STejun Heo 
25840f900049STejun Heo static void free_cwqs(struct cpu_workqueue_struct *cwqs)
25850f900049STejun Heo {
25860f900049STejun Heo #ifndef CONFIG_SMP
25870f900049STejun Heo 	/* on UP, the pointer to free is stored right after the cwq */
25880f900049STejun Heo 	if (cwqs)
25890f900049STejun Heo 		free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
25900f900049STejun Heo #else
25910f900049STejun Heo 	free_percpu(cwqs);
25920f900049STejun Heo #endif
25930f900049STejun Heo }
25940f900049STejun Heo 
2595b71ab8c2STejun Heo static int wq_clamp_max_active(int max_active, const char *name)
2596b71ab8c2STejun Heo {
2597b71ab8c2STejun Heo 	if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
2598b71ab8c2STejun Heo 		printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2599b71ab8c2STejun Heo 		       "is out of range, clamping between %d and %d\n",
2600b71ab8c2STejun Heo 		       max_active, name, 1, WQ_MAX_ACTIVE);
2601b71ab8c2STejun Heo 
2602b71ab8c2STejun Heo 	return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
2603b71ab8c2STejun Heo }
2604b71ab8c2STejun Heo 
2605d320c038STejun Heo struct workqueue_struct *__alloc_workqueue_key(const char *name,
260697e37d7bSTejun Heo 					       unsigned int flags,
26071e19ffc6STejun Heo 					       int max_active,
2608eb13ba87SJohannes Berg 					       struct lock_class_key *key,
2609eb13ba87SJohannes Berg 					       const char *lock_name)
26103af24433SOleg Nesterov {
26113af24433SOleg Nesterov 	struct workqueue_struct *wq;
2612c34056a3STejun Heo 	unsigned int cpu;
26133af24433SOleg Nesterov 
2614d320c038STejun Heo 	max_active = max_active ?: WQ_DFL_ACTIVE;
2615b71ab8c2STejun Heo 	max_active = wq_clamp_max_active(max_active, name);
26161e19ffc6STejun Heo 
26173af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
26183af24433SOleg Nesterov 	if (!wq)
26194690c4abSTejun Heo 		goto err;
26203af24433SOleg Nesterov 
26210f900049STejun Heo 	wq->cpu_wq = alloc_cwqs();
26224690c4abSTejun Heo 	if (!wq->cpu_wq)
26234690c4abSTejun Heo 		goto err;
26243af24433SOleg Nesterov 
262597e37d7bSTejun Heo 	wq->flags = flags;
2626a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
262773f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
262873f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
262973f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
263073f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
2631502ca9d8STejun Heo 	wq->single_cpu = NR_CPUS;
2632502ca9d8STejun Heo 
26333af24433SOleg Nesterov 	wq->name = name;
2634eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2635cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
26363af24433SOleg Nesterov 
26373af24433SOleg Nesterov 	for_each_possible_cpu(cpu) {
26381537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
26398b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
26401537663fSTejun Heo 
26410f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
26428b03ae3cSTejun Heo 		cwq->gcwq = gcwq;
2643c34056a3STejun Heo 		cwq->wq = wq;
264473f53c4aSTejun Heo 		cwq->flush_color = -1;
26451e19ffc6STejun Heo 		cwq->max_active = max_active;
26461e19ffc6STejun Heo 		INIT_LIST_HEAD(&cwq->delayed_works);
2647e22bee78STejun Heo 	}
26481537663fSTejun Heo 
2649e22bee78STejun Heo 	if (flags & WQ_RESCUER) {
2650e22bee78STejun Heo 		struct worker *rescuer;
2651e22bee78STejun Heo 
2652e22bee78STejun Heo 		if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL))
2653e22bee78STejun Heo 			goto err;
2654e22bee78STejun Heo 
2655e22bee78STejun Heo 		wq->rescuer = rescuer = alloc_worker();
2656e22bee78STejun Heo 		if (!rescuer)
2657e22bee78STejun Heo 			goto err;
2658e22bee78STejun Heo 
2659e22bee78STejun Heo 		rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2660e22bee78STejun Heo 		if (IS_ERR(rescuer->task))
2661e22bee78STejun Heo 			goto err;
2662e22bee78STejun Heo 
2663e22bee78STejun Heo 		wq->rescuer = rescuer;
2664e22bee78STejun Heo 		rescuer->task->flags |= PF_THREAD_BOUND;
2665e22bee78STejun Heo 		wake_up_process(rescuer->task);
26663af24433SOleg Nesterov 	}
26671537663fSTejun Heo 
2668a0a1a5fdSTejun Heo 	/*
2669a0a1a5fdSTejun Heo 	 * workqueue_lock protects global freeze state and workqueues
2670a0a1a5fdSTejun Heo 	 * list.  Grab it, set max_active accordingly and add the new
2671a0a1a5fdSTejun Heo 	 * workqueue to workqueues list.
2672a0a1a5fdSTejun Heo 	 */
26731537663fSTejun Heo 	spin_lock(&workqueue_lock);
2674a0a1a5fdSTejun Heo 
2675a0a1a5fdSTejun Heo 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
2676a0a1a5fdSTejun Heo 		for_each_possible_cpu(cpu)
2677a0a1a5fdSTejun Heo 			get_cwq(cpu, wq)->max_active = 0;
2678a0a1a5fdSTejun Heo 
26791537663fSTejun Heo 	list_add(&wq->list, &workqueues);
2680a0a1a5fdSTejun Heo 
26811537663fSTejun Heo 	spin_unlock(&workqueue_lock);
26821537663fSTejun Heo 
26833af24433SOleg Nesterov 	return wq;
26844690c4abSTejun Heo err:
26854690c4abSTejun Heo 	if (wq) {
26860f900049STejun Heo 		free_cwqs(wq->cpu_wq);
2687e22bee78STejun Heo 		free_cpumask_var(wq->mayday_mask);
2688e22bee78STejun Heo 		kfree(wq->rescuer);
26894690c4abSTejun Heo 		kfree(wq);
26904690c4abSTejun Heo 	}
26914690c4abSTejun Heo 	return NULL;
26923af24433SOleg Nesterov }
2693d320c038STejun Heo EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
26943af24433SOleg Nesterov 
26953af24433SOleg Nesterov /**
26963af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
26973af24433SOleg Nesterov  * @wq: target workqueue
26983af24433SOleg Nesterov  *
26993af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
27003af24433SOleg Nesterov  */
27013af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
27023af24433SOleg Nesterov {
2703c8e55f36STejun Heo 	unsigned int cpu;
27043af24433SOleg Nesterov 
2705a0a1a5fdSTejun Heo 	flush_workqueue(wq);
2706a0a1a5fdSTejun Heo 
2707a0a1a5fdSTejun Heo 	/*
2708a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
2709a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
2710a0a1a5fdSTejun Heo 	 */
271195402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
27123af24433SOleg Nesterov 	list_del(&wq->list);
271395402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
27143af24433SOleg Nesterov 
2715e22bee78STejun Heo 	/* sanity check */
271673f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
271773f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
271873f53c4aSTejun Heo 		int i;
271973f53c4aSTejun Heo 
272073f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
272173f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
27221e19ffc6STejun Heo 		BUG_ON(cwq->nr_active);
27231e19ffc6STejun Heo 		BUG_ON(!list_empty(&cwq->delayed_works));
272473f53c4aSTejun Heo 	}
27251537663fSTejun Heo 
2726e22bee78STejun Heo 	if (wq->flags & WQ_RESCUER) {
2727e22bee78STejun Heo 		kthread_stop(wq->rescuer->task);
2728e22bee78STejun Heo 		free_cpumask_var(wq->mayday_mask);
2729e22bee78STejun Heo 	}
2730e22bee78STejun Heo 
27310f900049STejun Heo 	free_cwqs(wq->cpu_wq);
27323af24433SOleg Nesterov 	kfree(wq);
27333af24433SOleg Nesterov }
27343af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
27353af24433SOleg Nesterov 
2736dcd989cbSTejun Heo /**
2737dcd989cbSTejun Heo  * workqueue_set_max_active - adjust max_active of a workqueue
2738dcd989cbSTejun Heo  * @wq: target workqueue
2739dcd989cbSTejun Heo  * @max_active: new max_active value.
2740dcd989cbSTejun Heo  *
2741dcd989cbSTejun Heo  * Set max_active of @wq to @max_active.
2742dcd989cbSTejun Heo  *
2743dcd989cbSTejun Heo  * CONTEXT:
2744dcd989cbSTejun Heo  * Don't call from IRQ context.
2745dcd989cbSTejun Heo  */
2746dcd989cbSTejun Heo void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2747dcd989cbSTejun Heo {
2748dcd989cbSTejun Heo 	unsigned int cpu;
2749dcd989cbSTejun Heo 
2750dcd989cbSTejun Heo 	max_active = wq_clamp_max_active(max_active, wq->name);
2751dcd989cbSTejun Heo 
2752dcd989cbSTejun Heo 	spin_lock(&workqueue_lock);
2753dcd989cbSTejun Heo 
2754dcd989cbSTejun Heo 	wq->saved_max_active = max_active;
2755dcd989cbSTejun Heo 
2756dcd989cbSTejun Heo 	for_each_possible_cpu(cpu) {
2757dcd989cbSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
2758dcd989cbSTejun Heo 
2759dcd989cbSTejun Heo 		spin_lock_irq(&gcwq->lock);
2760dcd989cbSTejun Heo 
2761dcd989cbSTejun Heo 		if (!(wq->flags & WQ_FREEZEABLE) ||
2762dcd989cbSTejun Heo 		    !(gcwq->flags & GCWQ_FREEZING))
2763dcd989cbSTejun Heo 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
2764dcd989cbSTejun Heo 
2765dcd989cbSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2766dcd989cbSTejun Heo 	}
2767dcd989cbSTejun Heo 
2768dcd989cbSTejun Heo 	spin_unlock(&workqueue_lock);
2769dcd989cbSTejun Heo }
2770dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_set_max_active);
2771dcd989cbSTejun Heo 
2772dcd989cbSTejun Heo /**
2773dcd989cbSTejun Heo  * workqueue_congested - test whether a workqueue is congested
2774dcd989cbSTejun Heo  * @cpu: CPU in question
2775dcd989cbSTejun Heo  * @wq: target workqueue
2776dcd989cbSTejun Heo  *
2777dcd989cbSTejun Heo  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
2778dcd989cbSTejun Heo  * no synchronization around this function and the test result is
2779dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2780dcd989cbSTejun Heo  *
2781dcd989cbSTejun Heo  * RETURNS:
2782dcd989cbSTejun Heo  * %true if congested, %false otherwise.
2783dcd989cbSTejun Heo  */
2784dcd989cbSTejun Heo bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
2785dcd989cbSTejun Heo {
2786dcd989cbSTejun Heo 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2787dcd989cbSTejun Heo 
2788dcd989cbSTejun Heo 	return !list_empty(&cwq->delayed_works);
2789dcd989cbSTejun Heo }
2790dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(workqueue_congested);
2791dcd989cbSTejun Heo 
2792dcd989cbSTejun Heo /**
2793dcd989cbSTejun Heo  * work_cpu - return the last known associated cpu for @work
2794dcd989cbSTejun Heo  * @work: the work of interest
2795dcd989cbSTejun Heo  *
2796dcd989cbSTejun Heo  * RETURNS:
2797dcd989cbSTejun Heo  * CPU number if @work was ever queued.  NR_CPUS otherwise.
2798dcd989cbSTejun Heo  */
2799dcd989cbSTejun Heo unsigned int work_cpu(struct work_struct *work)
2800dcd989cbSTejun Heo {
2801dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2802dcd989cbSTejun Heo 
2803dcd989cbSTejun Heo 	return gcwq ? gcwq->cpu : NR_CPUS;
2804dcd989cbSTejun Heo }
2805dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_cpu);
2806dcd989cbSTejun Heo 
2807dcd989cbSTejun Heo /**
2808dcd989cbSTejun Heo  * work_busy - test whether a work is currently pending or running
2809dcd989cbSTejun Heo  * @work: the work to be tested
2810dcd989cbSTejun Heo  *
2811dcd989cbSTejun Heo  * Test whether @work is currently pending or running.  There is no
2812dcd989cbSTejun Heo  * synchronization around this function and the test result is
2813dcd989cbSTejun Heo  * unreliable and only useful as advisory hints or for debugging.
2814dcd989cbSTejun Heo  * Especially for reentrant wqs, the pending state might hide the
2815dcd989cbSTejun Heo  * running state.
2816dcd989cbSTejun Heo  *
2817dcd989cbSTejun Heo  * RETURNS:
2818dcd989cbSTejun Heo  * OR'd bitmask of WORK_BUSY_* bits.
2819dcd989cbSTejun Heo  */
2820dcd989cbSTejun Heo unsigned int work_busy(struct work_struct *work)
2821dcd989cbSTejun Heo {
2822dcd989cbSTejun Heo 	struct global_cwq *gcwq = get_work_gcwq(work);
2823dcd989cbSTejun Heo 	unsigned long flags;
2824dcd989cbSTejun Heo 	unsigned int ret = 0;
2825dcd989cbSTejun Heo 
2826dcd989cbSTejun Heo 	if (!gcwq)
2827dcd989cbSTejun Heo 		return false;
2828dcd989cbSTejun Heo 
2829dcd989cbSTejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
2830dcd989cbSTejun Heo 
2831dcd989cbSTejun Heo 	if (work_pending(work))
2832dcd989cbSTejun Heo 		ret |= WORK_BUSY_PENDING;
2833dcd989cbSTejun Heo 	if (find_worker_executing_work(gcwq, work))
2834dcd989cbSTejun Heo 		ret |= WORK_BUSY_RUNNING;
2835dcd989cbSTejun Heo 
2836dcd989cbSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
2837dcd989cbSTejun Heo 
2838dcd989cbSTejun Heo 	return ret;
2839dcd989cbSTejun Heo }
2840dcd989cbSTejun Heo EXPORT_SYMBOL_GPL(work_busy);
2841dcd989cbSTejun Heo 
2842db7bccf4STejun Heo /*
2843db7bccf4STejun Heo  * CPU hotplug.
2844db7bccf4STejun Heo  *
2845e22bee78STejun Heo  * There are two challenges in supporting CPU hotplug.  Firstly, there
2846e22bee78STejun Heo  * are a lot of assumptions on strong associations among work, cwq and
2847e22bee78STejun Heo  * gcwq which make migrating pending and scheduled works very
2848e22bee78STejun Heo  * difficult to implement without impacting hot paths.  Secondly,
2849e22bee78STejun Heo  * gcwqs serve mix of short, long and very long running works making
2850e22bee78STejun Heo  * blocked draining impractical.
2851e22bee78STejun Heo  *
2852e22bee78STejun Heo  * This is solved by allowing a gcwq to be detached from CPU, running
2853e22bee78STejun Heo  * it with unbound (rogue) workers and allowing it to be reattached
2854e22bee78STejun Heo  * later if the cpu comes back online.  A separate thread is created
2855e22bee78STejun Heo  * to govern a gcwq in such state and is called the trustee of the
2856e22bee78STejun Heo  * gcwq.
2857db7bccf4STejun Heo  *
2858db7bccf4STejun Heo  * Trustee states and their descriptions.
2859db7bccf4STejun Heo  *
2860db7bccf4STejun Heo  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
2861db7bccf4STejun Heo  *		new trustee is started with this state.
2862db7bccf4STejun Heo  *
2863db7bccf4STejun Heo  * IN_CHARGE	Once started, trustee will enter this state after
2864e22bee78STejun Heo  *		assuming the manager role and making all existing
2865e22bee78STejun Heo  *		workers rogue.  DOWN_PREPARE waits for trustee to
2866e22bee78STejun Heo  *		enter this state.  After reaching IN_CHARGE, trustee
2867e22bee78STejun Heo  *		tries to execute the pending worklist until it's empty
2868e22bee78STejun Heo  *		and the state is set to BUTCHER, or the state is set
2869e22bee78STejun Heo  *		to RELEASE.
2870db7bccf4STejun Heo  *
2871db7bccf4STejun Heo  * BUTCHER	Command state which is set by the cpu callback after
2872db7bccf4STejun Heo  *		the cpu has went down.  Once this state is set trustee
2873db7bccf4STejun Heo  *		knows that there will be no new works on the worklist
2874db7bccf4STejun Heo  *		and once the worklist is empty it can proceed to
2875db7bccf4STejun Heo  *		killing idle workers.
2876db7bccf4STejun Heo  *
2877db7bccf4STejun Heo  * RELEASE	Command state which is set by the cpu callback if the
2878db7bccf4STejun Heo  *		cpu down has been canceled or it has come online
2879db7bccf4STejun Heo  *		again.  After recognizing this state, trustee stops
2880e22bee78STejun Heo  *		trying to drain or butcher and clears ROGUE, rebinds
2881e22bee78STejun Heo  *		all remaining workers back to the cpu and releases
2882e22bee78STejun Heo  *		manager role.
2883db7bccf4STejun Heo  *
2884db7bccf4STejun Heo  * DONE		Trustee will enter this state after BUTCHER or RELEASE
2885db7bccf4STejun Heo  *		is complete.
2886db7bccf4STejun Heo  *
2887db7bccf4STejun Heo  *          trustee                 CPU                draining
2888db7bccf4STejun Heo  *         took over                down               complete
2889db7bccf4STejun Heo  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2890db7bccf4STejun Heo  *                        |                     |                  ^
2891db7bccf4STejun Heo  *                        | CPU is back online  v   return workers |
2892db7bccf4STejun Heo  *                         ----------------> RELEASE --------------
2893db7bccf4STejun Heo  */
2894db7bccf4STejun Heo 
2895db7bccf4STejun Heo /**
2896db7bccf4STejun Heo  * trustee_wait_event_timeout - timed event wait for trustee
2897db7bccf4STejun Heo  * @cond: condition to wait for
2898db7bccf4STejun Heo  * @timeout: timeout in jiffies
2899db7bccf4STejun Heo  *
2900db7bccf4STejun Heo  * wait_event_timeout() for trustee to use.  Handles locking and
2901db7bccf4STejun Heo  * checks for RELEASE request.
2902db7bccf4STejun Heo  *
2903db7bccf4STejun Heo  * CONTEXT:
2904db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2905db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2906db7bccf4STejun Heo  *
2907db7bccf4STejun Heo  * RETURNS:
2908db7bccf4STejun Heo  * Positive indicating left time if @cond is satisfied, 0 if timed
2909db7bccf4STejun Heo  * out, -1 if canceled.
2910db7bccf4STejun Heo  */
2911db7bccf4STejun Heo #define trustee_wait_event_timeout(cond, timeout) ({			\
2912db7bccf4STejun Heo 	long __ret = (timeout);						\
2913db7bccf4STejun Heo 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
2914db7bccf4STejun Heo 	       __ret) {							\
2915db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);				\
2916db7bccf4STejun Heo 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
2917db7bccf4STejun Heo 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
2918db7bccf4STejun Heo 			__ret);						\
2919db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);				\
2920db7bccf4STejun Heo 	}								\
2921db7bccf4STejun Heo 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
2922db7bccf4STejun Heo })
2923db7bccf4STejun Heo 
2924db7bccf4STejun Heo /**
2925db7bccf4STejun Heo  * trustee_wait_event - event wait for trustee
2926db7bccf4STejun Heo  * @cond: condition to wait for
2927db7bccf4STejun Heo  *
2928db7bccf4STejun Heo  * wait_event() for trustee to use.  Automatically handles locking and
2929db7bccf4STejun Heo  * checks for CANCEL request.
2930db7bccf4STejun Heo  *
2931db7bccf4STejun Heo  * CONTEXT:
2932db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2933db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2934db7bccf4STejun Heo  *
2935db7bccf4STejun Heo  * RETURNS:
2936db7bccf4STejun Heo  * 0 if @cond is satisfied, -1 if canceled.
2937db7bccf4STejun Heo  */
2938db7bccf4STejun Heo #define trustee_wait_event(cond) ({					\
2939db7bccf4STejun Heo 	long __ret1;							\
2940db7bccf4STejun Heo 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2941db7bccf4STejun Heo 	__ret1 < 0 ? -1 : 0;						\
2942db7bccf4STejun Heo })
2943db7bccf4STejun Heo 
2944db7bccf4STejun Heo static int __cpuinit trustee_thread(void *__gcwq)
2945db7bccf4STejun Heo {
2946db7bccf4STejun Heo 	struct global_cwq *gcwq = __gcwq;
2947db7bccf4STejun Heo 	struct worker *worker;
2948e22bee78STejun Heo 	struct work_struct *work;
2949db7bccf4STejun Heo 	struct hlist_node *pos;
2950e22bee78STejun Heo 	long rc;
2951db7bccf4STejun Heo 	int i;
2952db7bccf4STejun Heo 
2953db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2954db7bccf4STejun Heo 
2955db7bccf4STejun Heo 	spin_lock_irq(&gcwq->lock);
2956db7bccf4STejun Heo 	/*
2957e22bee78STejun Heo 	 * Claim the manager position and make all workers rogue.
2958e22bee78STejun Heo 	 * Trustee must be bound to the target cpu and can't be
2959e22bee78STejun Heo 	 * cancelled.
2960db7bccf4STejun Heo 	 */
2961db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2962e22bee78STejun Heo 	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
2963e22bee78STejun Heo 	BUG_ON(rc < 0);
2964e22bee78STejun Heo 
2965e22bee78STejun Heo 	gcwq->flags |= GCWQ_MANAGING_WORKERS;
2966db7bccf4STejun Heo 
2967db7bccf4STejun Heo 	list_for_each_entry(worker, &gcwq->idle_list, entry)
2968d302f017STejun Heo 		worker_set_flags(worker, WORKER_ROGUE, false);
2969db7bccf4STejun Heo 
2970db7bccf4STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq)
2971d302f017STejun Heo 		worker_set_flags(worker, WORKER_ROGUE, false);
2972db7bccf4STejun Heo 
2973db7bccf4STejun Heo 	/*
2974e22bee78STejun Heo 	 * Call schedule() so that we cross rq->lock and thus can
2975e22bee78STejun Heo 	 * guarantee sched callbacks see the rogue flag.  This is
2976e22bee78STejun Heo 	 * necessary as scheduler callbacks may be invoked from other
2977e22bee78STejun Heo 	 * cpus.
2978e22bee78STejun Heo 	 */
2979e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
2980e22bee78STejun Heo 	schedule();
2981e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
2982e22bee78STejun Heo 
2983e22bee78STejun Heo 	/*
2984e22bee78STejun Heo 	 * Sched callbacks are disabled now.  gcwq->nr_running should
2985e22bee78STejun Heo 	 * be zero and will stay that way, making need_more_worker()
2986e22bee78STejun Heo 	 * and keep_working() always return true as long as the
2987e22bee78STejun Heo 	 * worklist is not empty.
2988e22bee78STejun Heo 	 */
2989e22bee78STejun Heo 	WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0);
2990e22bee78STejun Heo 
2991e22bee78STejun Heo 	spin_unlock_irq(&gcwq->lock);
2992e22bee78STejun Heo 	del_timer_sync(&gcwq->idle_timer);
2993e22bee78STejun Heo 	spin_lock_irq(&gcwq->lock);
2994e22bee78STejun Heo 
2995e22bee78STejun Heo 	/*
2996db7bccf4STejun Heo 	 * We're now in charge.  Notify and proceed to drain.  We need
2997db7bccf4STejun Heo 	 * to keep the gcwq running during the whole CPU down
2998db7bccf4STejun Heo 	 * procedure as other cpu hotunplug callbacks may need to
2999db7bccf4STejun Heo 	 * flush currently running tasks.
3000db7bccf4STejun Heo 	 */
3001db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3002db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3003db7bccf4STejun Heo 
3004db7bccf4STejun Heo 	/*
3005db7bccf4STejun Heo 	 * The original cpu is in the process of dying and may go away
3006db7bccf4STejun Heo 	 * anytime now.  When that happens, we and all workers would
3007e22bee78STejun Heo 	 * be migrated to other cpus.  Try draining any left work.  We
3008e22bee78STejun Heo 	 * want to get it over with ASAP - spam rescuers, wake up as
3009e22bee78STejun Heo 	 * many idlers as necessary and create new ones till the
3010e22bee78STejun Heo 	 * worklist is empty.  Note that if the gcwq is frozen, there
3011e22bee78STejun Heo 	 * may be frozen works in freezeable cwqs.  Don't declare
3012e22bee78STejun Heo 	 * completion while frozen.
3013db7bccf4STejun Heo 	 */
3014db7bccf4STejun Heo 	while (gcwq->nr_workers != gcwq->nr_idle ||
3015db7bccf4STejun Heo 	       gcwq->flags & GCWQ_FREEZING ||
3016db7bccf4STejun Heo 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3017e22bee78STejun Heo 		int nr_works = 0;
3018e22bee78STejun Heo 
3019e22bee78STejun Heo 		list_for_each_entry(work, &gcwq->worklist, entry) {
3020e22bee78STejun Heo 			send_mayday(work);
3021e22bee78STejun Heo 			nr_works++;
3022e22bee78STejun Heo 		}
3023e22bee78STejun Heo 
3024e22bee78STejun Heo 		list_for_each_entry(worker, &gcwq->idle_list, entry) {
3025e22bee78STejun Heo 			if (!nr_works--)
3026e22bee78STejun Heo 				break;
3027e22bee78STejun Heo 			wake_up_process(worker->task);
3028e22bee78STejun Heo 		}
3029e22bee78STejun Heo 
3030e22bee78STejun Heo 		if (need_to_create_worker(gcwq)) {
3031e22bee78STejun Heo 			spin_unlock_irq(&gcwq->lock);
3032e22bee78STejun Heo 			worker = create_worker(gcwq, false);
3033e22bee78STejun Heo 			spin_lock_irq(&gcwq->lock);
3034e22bee78STejun Heo 			if (worker) {
3035e22bee78STejun Heo 				worker_set_flags(worker, WORKER_ROGUE, false);
3036e22bee78STejun Heo 				start_worker(worker);
3037e22bee78STejun Heo 			}
3038e22bee78STejun Heo 		}
3039e22bee78STejun Heo 
3040db7bccf4STejun Heo 		/* give a breather */
3041db7bccf4STejun Heo 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3042db7bccf4STejun Heo 			break;
3043db7bccf4STejun Heo 	}
3044db7bccf4STejun Heo 
3045e22bee78STejun Heo 	/*
3046e22bee78STejun Heo 	 * Either all works have been scheduled and cpu is down, or
3047e22bee78STejun Heo 	 * cpu down has already been canceled.  Wait for and butcher
3048e22bee78STejun Heo 	 * all workers till we're canceled.
3049e22bee78STejun Heo 	 */
3050e22bee78STejun Heo 	do {
3051e22bee78STejun Heo 		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3052e22bee78STejun Heo 		while (!list_empty(&gcwq->idle_list))
3053e22bee78STejun Heo 			destroy_worker(list_first_entry(&gcwq->idle_list,
3054e22bee78STejun Heo 							struct worker, entry));
3055e22bee78STejun Heo 	} while (gcwq->nr_workers && rc >= 0);
3056e22bee78STejun Heo 
3057e22bee78STejun Heo 	/*
3058e22bee78STejun Heo 	 * At this point, either draining has completed and no worker
3059e22bee78STejun Heo 	 * is left, or cpu down has been canceled or the cpu is being
3060e22bee78STejun Heo 	 * brought back up.  There shouldn't be any idle one left.
3061e22bee78STejun Heo 	 * Tell the remaining busy ones to rebind once it finishes the
3062e22bee78STejun Heo 	 * currently scheduled works by scheduling the rebind_work.
3063e22bee78STejun Heo 	 */
3064e22bee78STejun Heo 	WARN_ON(!list_empty(&gcwq->idle_list));
3065e22bee78STejun Heo 
3066e22bee78STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq) {
3067e22bee78STejun Heo 		struct work_struct *rebind_work = &worker->rebind_work;
3068e22bee78STejun Heo 
3069e22bee78STejun Heo 		/*
3070e22bee78STejun Heo 		 * Rebind_work may race with future cpu hotplug
3071e22bee78STejun Heo 		 * operations.  Use a separate flag to mark that
3072e22bee78STejun Heo 		 * rebinding is scheduled.
3073e22bee78STejun Heo 		 */
3074e22bee78STejun Heo 		worker_set_flags(worker, WORKER_REBIND, false);
3075e22bee78STejun Heo 		worker_clr_flags(worker, WORKER_ROGUE);
3076e22bee78STejun Heo 
3077e22bee78STejun Heo 		/* queue rebind_work, wq doesn't matter, use the default one */
3078e22bee78STejun Heo 		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3079e22bee78STejun Heo 				     work_data_bits(rebind_work)))
3080e22bee78STejun Heo 			continue;
3081e22bee78STejun Heo 
3082e22bee78STejun Heo 		debug_work_activate(rebind_work);
3083d320c038STejun Heo 		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3084e22bee78STejun Heo 			    worker->scheduled.next,
3085e22bee78STejun Heo 			    work_color_to_flags(WORK_NO_COLOR));
3086e22bee78STejun Heo 	}
3087e22bee78STejun Heo 
3088e22bee78STejun Heo 	/* relinquish manager role */
3089e22bee78STejun Heo 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3090e22bee78STejun Heo 
3091db7bccf4STejun Heo 	/* notify completion */
3092db7bccf4STejun Heo 	gcwq->trustee = NULL;
3093db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_DONE;
3094db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
3095db7bccf4STejun Heo 	spin_unlock_irq(&gcwq->lock);
3096db7bccf4STejun Heo 	return 0;
3097db7bccf4STejun Heo }
3098db7bccf4STejun Heo 
3099db7bccf4STejun Heo /**
3100db7bccf4STejun Heo  * wait_trustee_state - wait for trustee to enter the specified state
3101db7bccf4STejun Heo  * @gcwq: gcwq the trustee of interest belongs to
3102db7bccf4STejun Heo  * @state: target state to wait for
3103db7bccf4STejun Heo  *
3104db7bccf4STejun Heo  * Wait for the trustee to reach @state.  DONE is already matched.
3105db7bccf4STejun Heo  *
3106db7bccf4STejun Heo  * CONTEXT:
3107db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3108db7bccf4STejun Heo  * multiple times.  To be used by cpu_callback.
3109db7bccf4STejun Heo  */
3110db7bccf4STejun Heo static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3111db7bccf4STejun Heo {
3112db7bccf4STejun Heo 	if (!(gcwq->trustee_state == state ||
3113db7bccf4STejun Heo 	      gcwq->trustee_state == TRUSTEE_DONE)) {
3114db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);
3115db7bccf4STejun Heo 		__wait_event(gcwq->trustee_wait,
3116db7bccf4STejun Heo 			     gcwq->trustee_state == state ||
3117db7bccf4STejun Heo 			     gcwq->trustee_state == TRUSTEE_DONE);
3118db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);
3119db7bccf4STejun Heo 	}
3120db7bccf4STejun Heo }
3121db7bccf4STejun Heo 
31229c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
31231da177e4SLinus Torvalds 						unsigned long action,
31241da177e4SLinus Torvalds 						void *hcpu)
31251da177e4SLinus Torvalds {
31263af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
3127db7bccf4STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
3128db7bccf4STejun Heo 	struct task_struct *new_trustee = NULL;
3129e22bee78STejun Heo 	struct worker *uninitialized_var(new_worker);
3130db7bccf4STejun Heo 	unsigned long flags;
31311da177e4SLinus Torvalds 
31328bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
31338bb78442SRafael J. Wysocki 
3134db7bccf4STejun Heo 	switch (action) {
3135db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3136db7bccf4STejun Heo 		new_trustee = kthread_create(trustee_thread, gcwq,
3137db7bccf4STejun Heo 					     "workqueue_trustee/%d\n", cpu);
3138db7bccf4STejun Heo 		if (IS_ERR(new_trustee))
3139db7bccf4STejun Heo 			return notifier_from_errno(PTR_ERR(new_trustee));
3140db7bccf4STejun Heo 		kthread_bind(new_trustee, cpu);
3141e22bee78STejun Heo 		/* fall through */
3142e22bee78STejun Heo 	case CPU_UP_PREPARE:
3143e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3144e22bee78STejun Heo 		new_worker = create_worker(gcwq, false);
3145e22bee78STejun Heo 		if (!new_worker) {
3146e22bee78STejun Heo 			if (new_trustee)
3147e22bee78STejun Heo 				kthread_stop(new_trustee);
3148e22bee78STejun Heo 			return NOTIFY_BAD;
3149e22bee78STejun Heo 		}
3150db7bccf4STejun Heo 	}
31511537663fSTejun Heo 
3152db7bccf4STejun Heo 	/* some are called w/ irq disabled, don't disturb irq status */
3153db7bccf4STejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
31543af24433SOleg Nesterov 
31553af24433SOleg Nesterov 	switch (action) {
3156db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
3157db7bccf4STejun Heo 		/* initialize trustee and tell it to acquire the gcwq */
3158db7bccf4STejun Heo 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3159db7bccf4STejun Heo 		gcwq->trustee = new_trustee;
3160db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_START;
3161db7bccf4STejun Heo 		wake_up_process(gcwq->trustee);
3162db7bccf4STejun Heo 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3163e22bee78STejun Heo 		/* fall through */
3164e22bee78STejun Heo 	case CPU_UP_PREPARE:
3165e22bee78STejun Heo 		BUG_ON(gcwq->first_idle);
3166e22bee78STejun Heo 		gcwq->first_idle = new_worker;
3167e22bee78STejun Heo 		break;
3168e22bee78STejun Heo 
3169e22bee78STejun Heo 	case CPU_DYING:
3170e22bee78STejun Heo 		/*
3171e22bee78STejun Heo 		 * Before this, the trustee and all workers except for
3172e22bee78STejun Heo 		 * the ones which are still executing works from
3173e22bee78STejun Heo 		 * before the last CPU down must be on the cpu.  After
3174e22bee78STejun Heo 		 * this, they'll all be diasporas.
3175e22bee78STejun Heo 		 */
3176e22bee78STejun Heo 		gcwq->flags |= GCWQ_DISASSOCIATED;
3177db7bccf4STejun Heo 		break;
3178db7bccf4STejun Heo 
31793da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
3180db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_BUTCHER;
3181e22bee78STejun Heo 		/* fall through */
3182e22bee78STejun Heo 	case CPU_UP_CANCELED:
3183e22bee78STejun Heo 		destroy_worker(gcwq->first_idle);
3184e22bee78STejun Heo 		gcwq->first_idle = NULL;
3185db7bccf4STejun Heo 		break;
3186db7bccf4STejun Heo 
3187db7bccf4STejun Heo 	case CPU_DOWN_FAILED:
3188db7bccf4STejun Heo 	case CPU_ONLINE:
3189e22bee78STejun Heo 		gcwq->flags &= ~GCWQ_DISASSOCIATED;
3190db7bccf4STejun Heo 		if (gcwq->trustee_state != TRUSTEE_DONE) {
3191db7bccf4STejun Heo 			gcwq->trustee_state = TRUSTEE_RELEASE;
3192db7bccf4STejun Heo 			wake_up_process(gcwq->trustee);
3193db7bccf4STejun Heo 			wait_trustee_state(gcwq, TRUSTEE_DONE);
3194db7bccf4STejun Heo 		}
3195db7bccf4STejun Heo 
3196e22bee78STejun Heo 		/*
3197e22bee78STejun Heo 		 * Trustee is done and there might be no worker left.
3198e22bee78STejun Heo 		 * Put the first_idle in and request a real manager to
3199e22bee78STejun Heo 		 * take a look.
3200e22bee78STejun Heo 		 */
3201e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3202e22bee78STejun Heo 		kthread_bind(gcwq->first_idle->task, cpu);
3203e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3204e22bee78STejun Heo 		gcwq->flags |= GCWQ_MANAGE_WORKERS;
3205e22bee78STejun Heo 		start_worker(gcwq->first_idle);
3206e22bee78STejun Heo 		gcwq->first_idle = NULL;
32071da177e4SLinus Torvalds 		break;
32081da177e4SLinus Torvalds 	}
3209db7bccf4STejun Heo 
3210db7bccf4STejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
32111da177e4SLinus Torvalds 
32121537663fSTejun Heo 	return notifier_from_errno(0);
32131da177e4SLinus Torvalds }
32141da177e4SLinus Torvalds 
32152d3854a3SRusty Russell #ifdef CONFIG_SMP
32168ccad40dSRusty Russell 
32172d3854a3SRusty Russell struct work_for_cpu {
32186b44003eSAndrew Morton 	struct completion completion;
32192d3854a3SRusty Russell 	long (*fn)(void *);
32202d3854a3SRusty Russell 	void *arg;
32212d3854a3SRusty Russell 	long ret;
32222d3854a3SRusty Russell };
32232d3854a3SRusty Russell 
32246b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
32252d3854a3SRusty Russell {
32266b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
32272d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
32286b44003eSAndrew Morton 	complete(&wfc->completion);
32296b44003eSAndrew Morton 	return 0;
32302d3854a3SRusty Russell }
32312d3854a3SRusty Russell 
32322d3854a3SRusty Russell /**
32332d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
32342d3854a3SRusty Russell  * @cpu: the cpu to run on
32352d3854a3SRusty Russell  * @fn: the function to run
32362d3854a3SRusty Russell  * @arg: the function arg
32372d3854a3SRusty Russell  *
323831ad9081SRusty Russell  * This will return the value @fn returns.
323931ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
32406b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
32412d3854a3SRusty Russell  */
32422d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
32432d3854a3SRusty Russell {
32446b44003eSAndrew Morton 	struct task_struct *sub_thread;
32456b44003eSAndrew Morton 	struct work_for_cpu wfc = {
32466b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
32476b44003eSAndrew Morton 		.fn = fn,
32486b44003eSAndrew Morton 		.arg = arg,
32496b44003eSAndrew Morton 	};
32502d3854a3SRusty Russell 
32516b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
32526b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
32536b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
32546b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
32556b44003eSAndrew Morton 	wake_up_process(sub_thread);
32566b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
32572d3854a3SRusty Russell 	return wfc.ret;
32582d3854a3SRusty Russell }
32592d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
32602d3854a3SRusty Russell #endif /* CONFIG_SMP */
32612d3854a3SRusty Russell 
3262a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
3263a0a1a5fdSTejun Heo 
3264a0a1a5fdSTejun Heo /**
3265a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
3266a0a1a5fdSTejun Heo  *
3267a0a1a5fdSTejun Heo  * Start freezing workqueues.  After this function returns, all
3268a0a1a5fdSTejun Heo  * freezeable workqueues will queue new works to their frozen_works
32697e11629dSTejun Heo  * list instead of gcwq->worklist.
3270a0a1a5fdSTejun Heo  *
3271a0a1a5fdSTejun Heo  * CONTEXT:
32728b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3273a0a1a5fdSTejun Heo  */
3274a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
3275a0a1a5fdSTejun Heo {
3276a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
3277a0a1a5fdSTejun Heo 	unsigned int cpu;
3278a0a1a5fdSTejun Heo 
3279a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3280a0a1a5fdSTejun Heo 
3281a0a1a5fdSTejun Heo 	BUG_ON(workqueue_freezing);
3282a0a1a5fdSTejun Heo 	workqueue_freezing = true;
3283a0a1a5fdSTejun Heo 
3284a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
32858b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
32868b03ae3cSTejun Heo 
32878b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
32888b03ae3cSTejun Heo 
3289db7bccf4STejun Heo 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
3290db7bccf4STejun Heo 		gcwq->flags |= GCWQ_FREEZING;
3291db7bccf4STejun Heo 
3292a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3293a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3294a0a1a5fdSTejun Heo 
3295a0a1a5fdSTejun Heo 			if (wq->flags & WQ_FREEZEABLE)
3296a0a1a5fdSTejun Heo 				cwq->max_active = 0;
3297a0a1a5fdSTejun Heo 		}
32988b03ae3cSTejun Heo 
32998b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3300a0a1a5fdSTejun Heo 	}
3301a0a1a5fdSTejun Heo 
3302a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3303a0a1a5fdSTejun Heo }
3304a0a1a5fdSTejun Heo 
3305a0a1a5fdSTejun Heo /**
3306a0a1a5fdSTejun Heo  * freeze_workqueues_busy - are freezeable workqueues still busy?
3307a0a1a5fdSTejun Heo  *
3308a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
3309a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
3310a0a1a5fdSTejun Heo  *
3311a0a1a5fdSTejun Heo  * CONTEXT:
3312a0a1a5fdSTejun Heo  * Grabs and releases workqueue_lock.
3313a0a1a5fdSTejun Heo  *
3314a0a1a5fdSTejun Heo  * RETURNS:
3315a0a1a5fdSTejun Heo  * %true if some freezeable workqueues are still busy.  %false if
3316a0a1a5fdSTejun Heo  * freezing is complete.
3317a0a1a5fdSTejun Heo  */
3318a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
3319a0a1a5fdSTejun Heo {
3320a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
3321a0a1a5fdSTejun Heo 	unsigned int cpu;
3322a0a1a5fdSTejun Heo 	bool busy = false;
3323a0a1a5fdSTejun Heo 
3324a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3325a0a1a5fdSTejun Heo 
3326a0a1a5fdSTejun Heo 	BUG_ON(!workqueue_freezing);
3327a0a1a5fdSTejun Heo 
3328a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
3329a0a1a5fdSTejun Heo 		/*
3330a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
3331a0a1a5fdSTejun Heo 		 * to peek without lock.
3332a0a1a5fdSTejun Heo 		 */
3333a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3334a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3335a0a1a5fdSTejun Heo 
3336a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
3337a0a1a5fdSTejun Heo 				continue;
3338a0a1a5fdSTejun Heo 
3339a0a1a5fdSTejun Heo 			BUG_ON(cwq->nr_active < 0);
3340a0a1a5fdSTejun Heo 			if (cwq->nr_active) {
3341a0a1a5fdSTejun Heo 				busy = true;
3342a0a1a5fdSTejun Heo 				goto out_unlock;
3343a0a1a5fdSTejun Heo 			}
3344a0a1a5fdSTejun Heo 		}
3345a0a1a5fdSTejun Heo 	}
3346a0a1a5fdSTejun Heo out_unlock:
3347a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3348a0a1a5fdSTejun Heo 	return busy;
3349a0a1a5fdSTejun Heo }
3350a0a1a5fdSTejun Heo 
3351a0a1a5fdSTejun Heo /**
3352a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
3353a0a1a5fdSTejun Heo  *
3354a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
33557e11629dSTejun Heo  * frozen works are transferred to their respective gcwq worklists.
3356a0a1a5fdSTejun Heo  *
3357a0a1a5fdSTejun Heo  * CONTEXT:
33588b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
3359a0a1a5fdSTejun Heo  */
3360a0a1a5fdSTejun Heo void thaw_workqueues(void)
3361a0a1a5fdSTejun Heo {
3362a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
3363a0a1a5fdSTejun Heo 	unsigned int cpu;
3364a0a1a5fdSTejun Heo 
3365a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
3366a0a1a5fdSTejun Heo 
3367a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
3368a0a1a5fdSTejun Heo 		goto out_unlock;
3369a0a1a5fdSTejun Heo 
3370a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
33718b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
33728b03ae3cSTejun Heo 
33738b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
33748b03ae3cSTejun Heo 
3375db7bccf4STejun Heo 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3376db7bccf4STejun Heo 		gcwq->flags &= ~GCWQ_FREEZING;
3377db7bccf4STejun Heo 
3378a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
3379a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3380a0a1a5fdSTejun Heo 
3381a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
3382a0a1a5fdSTejun Heo 				continue;
3383a0a1a5fdSTejun Heo 
3384a0a1a5fdSTejun Heo 			/* restore max_active and repopulate worklist */
3385a0a1a5fdSTejun Heo 			cwq->max_active = wq->saved_max_active;
3386a0a1a5fdSTejun Heo 
3387a0a1a5fdSTejun Heo 			while (!list_empty(&cwq->delayed_works) &&
3388a0a1a5fdSTejun Heo 			       cwq->nr_active < cwq->max_active)
3389a0a1a5fdSTejun Heo 				cwq_activate_first_delayed(cwq);
3390a0a1a5fdSTejun Heo 
3391502ca9d8STejun Heo 			/* perform delayed unbind from single cpu if empty */
3392502ca9d8STejun Heo 			if (wq->single_cpu == gcwq->cpu &&
3393502ca9d8STejun Heo 			    !cwq->nr_active && list_empty(&cwq->delayed_works))
3394502ca9d8STejun Heo 				cwq_unbind_single_cpu(cwq);
3395a0a1a5fdSTejun Heo 		}
33968b03ae3cSTejun Heo 
3397e22bee78STejun Heo 		wake_up_worker(gcwq);
3398e22bee78STejun Heo 
33998b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
3400a0a1a5fdSTejun Heo 	}
3401a0a1a5fdSTejun Heo 
3402a0a1a5fdSTejun Heo 	workqueue_freezing = false;
3403a0a1a5fdSTejun Heo out_unlock:
3404a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
3405a0a1a5fdSTejun Heo }
3406a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
3407a0a1a5fdSTejun Heo 
3408c12920d1SOleg Nesterov void __init init_workqueues(void)
34091da177e4SLinus Torvalds {
3410c34056a3STejun Heo 	unsigned int cpu;
3411c8e55f36STejun Heo 	int i;
3412c34056a3STejun Heo 
34137a22ad75STejun Heo 	/*
34147a22ad75STejun Heo 	 * The pointer part of work->data is either pointing to the
34157a22ad75STejun Heo 	 * cwq or contains the cpu number the work ran last on.  Make
34167a22ad75STejun Heo 	 * sure cpu number won't overflow into kernel pointer area so
34177a22ad75STejun Heo 	 * that they can be distinguished.
34187a22ad75STejun Heo 	 */
34197a22ad75STejun Heo 	BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
34207a22ad75STejun Heo 
3421db7bccf4STejun Heo 	hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
34228b03ae3cSTejun Heo 
34238b03ae3cSTejun Heo 	/* initialize gcwqs */
34248b03ae3cSTejun Heo 	for_each_possible_cpu(cpu) {
34258b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
34268b03ae3cSTejun Heo 
34278b03ae3cSTejun Heo 		spin_lock_init(&gcwq->lock);
34287e11629dSTejun Heo 		INIT_LIST_HEAD(&gcwq->worklist);
34298b03ae3cSTejun Heo 		gcwq->cpu = cpu;
34308b03ae3cSTejun Heo 
3431c8e55f36STejun Heo 		INIT_LIST_HEAD(&gcwq->idle_list);
3432c8e55f36STejun Heo 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3433c8e55f36STejun Heo 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3434c8e55f36STejun Heo 
3435e22bee78STejun Heo 		init_timer_deferrable(&gcwq->idle_timer);
3436e22bee78STejun Heo 		gcwq->idle_timer.function = idle_worker_timeout;
3437e22bee78STejun Heo 		gcwq->idle_timer.data = (unsigned long)gcwq;
3438e22bee78STejun Heo 
3439e22bee78STejun Heo 		setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3440e22bee78STejun Heo 			    (unsigned long)gcwq);
3441e22bee78STejun Heo 
34428b03ae3cSTejun Heo 		ida_init(&gcwq->worker_ida);
3443db7bccf4STejun Heo 
3444db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_DONE;
3445db7bccf4STejun Heo 		init_waitqueue_head(&gcwq->trustee_wait);
34468b03ae3cSTejun Heo 	}
34478b03ae3cSTejun Heo 
3448e22bee78STejun Heo 	/* create the initial worker */
3449e22bee78STejun Heo 	for_each_online_cpu(cpu) {
3450e22bee78STejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
3451e22bee78STejun Heo 		struct worker *worker;
3452e22bee78STejun Heo 
3453e22bee78STejun Heo 		worker = create_worker(gcwq, true);
3454e22bee78STejun Heo 		BUG_ON(!worker);
3455e22bee78STejun Heo 		spin_lock_irq(&gcwq->lock);
3456e22bee78STejun Heo 		start_worker(worker);
3457e22bee78STejun Heo 		spin_unlock_irq(&gcwq->lock);
3458e22bee78STejun Heo 	}
3459e22bee78STejun Heo 
3460d320c038STejun Heo 	system_wq = alloc_workqueue("events", 0, 0);
3461d320c038STejun Heo 	system_long_wq = alloc_workqueue("events_long", 0, 0);
3462d320c038STejun Heo 	system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3463d320c038STejun Heo 	BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
34641da177e4SLinus Torvalds }
3465