xref: /linux-6.15/kernel/workqueue.c (revision 7a22ad75)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36c34056a3STejun Heo #include <linux/idr.h>
371da177e4SLinus Torvalds 
38c8e55f36STejun Heo enum {
39db7bccf4STejun Heo 	/* global_cwq flags */
40db7bccf4STejun Heo 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
41db7bccf4STejun Heo 
42c8e55f36STejun Heo 	/* worker flags */
43c8e55f36STejun Heo 	WORKER_STARTED		= 1 << 0,	/* started */
44c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
45c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
46db7bccf4STejun Heo 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
47db7bccf4STejun Heo 
48db7bccf4STejun Heo 	/* gcwq->trustee_state */
49db7bccf4STejun Heo 	TRUSTEE_START		= 0,		/* start */
50db7bccf4STejun Heo 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
51db7bccf4STejun Heo 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
52db7bccf4STejun Heo 	TRUSTEE_RELEASE		= 3,		/* release workers */
53db7bccf4STejun Heo 	TRUSTEE_DONE		= 4,		/* trustee is done */
54c8e55f36STejun Heo 
55c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
56c8e55f36STejun Heo 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
57c8e55f36STejun Heo 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
58db7bccf4STejun Heo 
59db7bccf4STejun Heo 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
60c8e55f36STejun Heo };
61c8e55f36STejun Heo 
621da177e4SLinus Torvalds /*
634690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
644690c4abSTejun Heo  *
654690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
664690c4abSTejun Heo  *
678b03ae3cSTejun Heo  * L: gcwq->lock protected.  Access with gcwq->lock held.
684690c4abSTejun Heo  *
6973f53c4aSTejun Heo  * F: wq->flush_mutex protected.
7073f53c4aSTejun Heo  *
714690c4abSTejun Heo  * W: workqueue_lock protected.
724690c4abSTejun Heo  */
734690c4abSTejun Heo 
748b03ae3cSTejun Heo struct global_cwq;
75c34056a3STejun Heo struct cpu_workqueue_struct;
76c34056a3STejun Heo 
77c34056a3STejun Heo struct worker {
78c8e55f36STejun Heo 	/* on idle list while idle, on busy hash table while busy */
79c8e55f36STejun Heo 	union {
80c8e55f36STejun Heo 		struct list_head	entry;	/* L: while idle */
81c8e55f36STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
82c8e55f36STejun Heo 	};
83c8e55f36STejun Heo 
84c34056a3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
858cca0eeaSTejun Heo 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
86affee4b2STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
87c34056a3STejun Heo 	struct task_struct	*task;		/* I: worker task */
888b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
89c34056a3STejun Heo 	struct cpu_workqueue_struct *cwq;	/* I: the associated cwq */
90c8e55f36STejun Heo 	unsigned int		flags;		/* L: flags */
91c34056a3STejun Heo 	int			id;		/* I: worker id */
92c34056a3STejun Heo };
93c34056a3STejun Heo 
944690c4abSTejun Heo /*
958b03ae3cSTejun Heo  * Global per-cpu workqueue.
968b03ae3cSTejun Heo  */
978b03ae3cSTejun Heo struct global_cwq {
988b03ae3cSTejun Heo 	spinlock_t		lock;		/* the gcwq lock */
998b03ae3cSTejun Heo 	unsigned int		cpu;		/* I: the associated cpu */
100db7bccf4STejun Heo 	unsigned int		flags;		/* L: GCWQ_* flags */
101c8e55f36STejun Heo 
102c8e55f36STejun Heo 	int			nr_workers;	/* L: total number of workers */
103c8e55f36STejun Heo 	int			nr_idle;	/* L: currently idle ones */
104c8e55f36STejun Heo 
105c8e55f36STejun Heo 	/* workers are chained either in the idle_list or busy_hash */
106c8e55f36STejun Heo 	struct list_head	idle_list;	/* L: list of idle workers */
107c8e55f36STejun Heo 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
108c8e55f36STejun Heo 						/* L: hash of busy workers */
109c8e55f36STejun Heo 
1108b03ae3cSTejun Heo 	struct ida		worker_ida;	/* L: for worker IDs */
111db7bccf4STejun Heo 
112db7bccf4STejun Heo 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
113db7bccf4STejun Heo 	unsigned int		trustee_state;	/* L: trustee state */
114db7bccf4STejun Heo 	wait_queue_head_t	trustee_wait;	/* trustee wait */
1158b03ae3cSTejun Heo } ____cacheline_aligned_in_smp;
1168b03ae3cSTejun Heo 
1178b03ae3cSTejun Heo /*
118502ca9d8STejun Heo  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
1190f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
1200f900049STejun Heo  * aligned at two's power of the number of flag bits.
1211da177e4SLinus Torvalds  */
1221da177e4SLinus Torvalds struct cpu_workqueue_struct {
1238b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
1241da177e4SLinus Torvalds 	struct list_head worklist;
125c34056a3STejun Heo 	struct worker		*worker;
1264690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
12773f53c4aSTejun Heo 	int			work_color;	/* L: current color */
12873f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
12973f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
13073f53c4aSTejun Heo 						/* L: nr of in_flight works */
1311e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
132a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
1331e19ffc6STejun Heo 	struct list_head	delayed_works;	/* L: delayed works */
1340f900049STejun Heo };
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds /*
13773f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
13873f53c4aSTejun Heo  */
13973f53c4aSTejun Heo struct wq_flusher {
14073f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
14173f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
14273f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
14373f53c4aSTejun Heo };
14473f53c4aSTejun Heo 
14573f53c4aSTejun Heo /*
1461da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
1471da177e4SLinus Torvalds  * per-CPU workqueues:
1481da177e4SLinus Torvalds  */
1491da177e4SLinus Torvalds struct workqueue_struct {
15097e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
1514690c4abSTejun Heo 	struct cpu_workqueue_struct *cpu_wq;	/* I: cwq's */
1524690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
15373f53c4aSTejun Heo 
15473f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
15573f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
15673f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
15773f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
15873f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
15973f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
16073f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
16173f53c4aSTejun Heo 
162502ca9d8STejun Heo 	unsigned long		single_cpu;	/* cpu for single cpu wq */
163502ca9d8STejun Heo 
164a0a1a5fdSTejun Heo 	int			saved_max_active; /* I: saved cwq max_active */
1654690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
1664e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
1674e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
1684e6045f1SJohannes Berg #endif
1691da177e4SLinus Torvalds };
1701da177e4SLinus Torvalds 
171db7bccf4STejun Heo #define for_each_busy_worker(worker, i, pos, gcwq)			\
172db7bccf4STejun Heo 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
173db7bccf4STejun Heo 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
174db7bccf4STejun Heo 
175dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
176dc186ad7SThomas Gleixner 
177dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
178dc186ad7SThomas Gleixner 
179dc186ad7SThomas Gleixner /*
180dc186ad7SThomas Gleixner  * fixup_init is called when:
181dc186ad7SThomas Gleixner  * - an active object is initialized
182dc186ad7SThomas Gleixner  */
183dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
184dc186ad7SThomas Gleixner {
185dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
186dc186ad7SThomas Gleixner 
187dc186ad7SThomas Gleixner 	switch (state) {
188dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
189dc186ad7SThomas Gleixner 		cancel_work_sync(work);
190dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
191dc186ad7SThomas Gleixner 		return 1;
192dc186ad7SThomas Gleixner 	default:
193dc186ad7SThomas Gleixner 		return 0;
194dc186ad7SThomas Gleixner 	}
195dc186ad7SThomas Gleixner }
196dc186ad7SThomas Gleixner 
197dc186ad7SThomas Gleixner /*
198dc186ad7SThomas Gleixner  * fixup_activate is called when:
199dc186ad7SThomas Gleixner  * - an active object is activated
200dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
201dc186ad7SThomas Gleixner  */
202dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
203dc186ad7SThomas Gleixner {
204dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
205dc186ad7SThomas Gleixner 
206dc186ad7SThomas Gleixner 	switch (state) {
207dc186ad7SThomas Gleixner 
208dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
209dc186ad7SThomas Gleixner 		/*
210dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
211dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
212dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
213dc186ad7SThomas Gleixner 		 */
21422df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
215dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
216dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
217dc186ad7SThomas Gleixner 			return 0;
218dc186ad7SThomas Gleixner 		}
219dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
220dc186ad7SThomas Gleixner 		return 0;
221dc186ad7SThomas Gleixner 
222dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
223dc186ad7SThomas Gleixner 		WARN_ON(1);
224dc186ad7SThomas Gleixner 
225dc186ad7SThomas Gleixner 	default:
226dc186ad7SThomas Gleixner 		return 0;
227dc186ad7SThomas Gleixner 	}
228dc186ad7SThomas Gleixner }
229dc186ad7SThomas Gleixner 
230dc186ad7SThomas Gleixner /*
231dc186ad7SThomas Gleixner  * fixup_free is called when:
232dc186ad7SThomas Gleixner  * - an active object is freed
233dc186ad7SThomas Gleixner  */
234dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
235dc186ad7SThomas Gleixner {
236dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
237dc186ad7SThomas Gleixner 
238dc186ad7SThomas Gleixner 	switch (state) {
239dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
240dc186ad7SThomas Gleixner 		cancel_work_sync(work);
241dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
242dc186ad7SThomas Gleixner 		return 1;
243dc186ad7SThomas Gleixner 	default:
244dc186ad7SThomas Gleixner 		return 0;
245dc186ad7SThomas Gleixner 	}
246dc186ad7SThomas Gleixner }
247dc186ad7SThomas Gleixner 
248dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
249dc186ad7SThomas Gleixner 	.name		= "work_struct",
250dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
251dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
252dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
253dc186ad7SThomas Gleixner };
254dc186ad7SThomas Gleixner 
255dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
256dc186ad7SThomas Gleixner {
257dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
258dc186ad7SThomas Gleixner }
259dc186ad7SThomas Gleixner 
260dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
261dc186ad7SThomas Gleixner {
262dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
263dc186ad7SThomas Gleixner }
264dc186ad7SThomas Gleixner 
265dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
266dc186ad7SThomas Gleixner {
267dc186ad7SThomas Gleixner 	if (onstack)
268dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
269dc186ad7SThomas Gleixner 	else
270dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
271dc186ad7SThomas Gleixner }
272dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
273dc186ad7SThomas Gleixner 
274dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
275dc186ad7SThomas Gleixner {
276dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
277dc186ad7SThomas Gleixner }
278dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
279dc186ad7SThomas Gleixner 
280dc186ad7SThomas Gleixner #else
281dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
282dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
283dc186ad7SThomas Gleixner #endif
284dc186ad7SThomas Gleixner 
28595402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
28695402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
2871da177e4SLinus Torvalds static LIST_HEAD(workqueues);
288a0a1a5fdSTejun Heo static bool workqueue_freezing;		/* W: have wqs started freezing? */
289c34056a3STejun Heo 
2908b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq);
2918b03ae3cSTejun Heo 
292c34056a3STejun Heo static int worker_thread(void *__worker);
2931da177e4SLinus Torvalds 
2948b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu)
2958b03ae3cSTejun Heo {
2968b03ae3cSTejun Heo 	return &per_cpu(global_cwq, cpu);
2978b03ae3cSTejun Heo }
2988b03ae3cSTejun Heo 
2994690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
3004690c4abSTejun Heo 					    struct workqueue_struct *wq)
301a848e3b6SOleg Nesterov {
302a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
303a848e3b6SOleg Nesterov }
304a848e3b6SOleg Nesterov 
30573f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
30673f53c4aSTejun Heo {
30773f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
30873f53c4aSTejun Heo }
30973f53c4aSTejun Heo 
31073f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
31173f53c4aSTejun Heo {
31273f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
31373f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
31473f53c4aSTejun Heo }
31573f53c4aSTejun Heo 
31673f53c4aSTejun Heo static int work_next_color(int color)
31773f53c4aSTejun Heo {
31873f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
31973f53c4aSTejun Heo }
32073f53c4aSTejun Heo 
3214594bf15SDavid Howells /*
322*7a22ad75STejun Heo  * Work data points to the cwq while a work is on queue.  Once
323*7a22ad75STejun Heo  * execution starts, it points to the cpu the work was last on.  This
324*7a22ad75STejun Heo  * can be distinguished by comparing the data value against
325*7a22ad75STejun Heo  * PAGE_OFFSET.
326*7a22ad75STejun Heo  *
327*7a22ad75STejun Heo  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
328*7a22ad75STejun Heo  * cwq, cpu or clear work->data.  These functions should only be
329*7a22ad75STejun Heo  * called while the work is owned - ie. while the PENDING bit is set.
330*7a22ad75STejun Heo  *
331*7a22ad75STejun Heo  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
332*7a22ad75STejun Heo  * corresponding to a work.  gcwq is available once the work has been
333*7a22ad75STejun Heo  * queued anywhere after initialization.  cwq is available only from
334*7a22ad75STejun Heo  * queueing until execution starts.
3354594bf15SDavid Howells  */
336*7a22ad75STejun Heo static inline void set_work_data(struct work_struct *work, unsigned long data,
337*7a22ad75STejun Heo 				 unsigned long flags)
338*7a22ad75STejun Heo {
339*7a22ad75STejun Heo 	BUG_ON(!work_pending(work));
340*7a22ad75STejun Heo 	atomic_long_set(&work->data, data | flags | work_static(work));
341*7a22ad75STejun Heo }
342*7a22ad75STejun Heo 
343*7a22ad75STejun Heo static void set_work_cwq(struct work_struct *work,
3444690c4abSTejun Heo 			 struct cpu_workqueue_struct *cwq,
3454690c4abSTejun Heo 			 unsigned long extra_flags)
346365970a1SDavid Howells {
347*7a22ad75STejun Heo 	set_work_data(work, (unsigned long)cwq,
34822df02bbSTejun Heo 		      WORK_STRUCT_PENDING | extra_flags);
349365970a1SDavid Howells }
350365970a1SDavid Howells 
351*7a22ad75STejun Heo static void set_work_cpu(struct work_struct *work, unsigned int cpu)
3524d707b9fSOleg Nesterov {
353*7a22ad75STejun Heo 	set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
3544d707b9fSOleg Nesterov }
3554d707b9fSOleg Nesterov 
356*7a22ad75STejun Heo static void clear_work_data(struct work_struct *work)
357365970a1SDavid Howells {
358*7a22ad75STejun Heo 	set_work_data(work, WORK_STRUCT_NO_CPU, 0);
359*7a22ad75STejun Heo }
360*7a22ad75STejun Heo 
361*7a22ad75STejun Heo static inline unsigned long get_work_data(struct work_struct *work)
362*7a22ad75STejun Heo {
363*7a22ad75STejun Heo 	return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
364*7a22ad75STejun Heo }
365*7a22ad75STejun Heo 
366*7a22ad75STejun Heo static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
367*7a22ad75STejun Heo {
368*7a22ad75STejun Heo 	unsigned long data = get_work_data(work);
369*7a22ad75STejun Heo 
370*7a22ad75STejun Heo 	return data >= PAGE_OFFSET ? (void *)data : NULL;
371*7a22ad75STejun Heo }
372*7a22ad75STejun Heo 
373*7a22ad75STejun Heo static struct global_cwq *get_work_gcwq(struct work_struct *work)
374*7a22ad75STejun Heo {
375*7a22ad75STejun Heo 	unsigned long data = get_work_data(work);
376*7a22ad75STejun Heo 	unsigned int cpu;
377*7a22ad75STejun Heo 
378*7a22ad75STejun Heo 	if (data >= PAGE_OFFSET)
379*7a22ad75STejun Heo 		return ((struct cpu_workqueue_struct *)data)->gcwq;
380*7a22ad75STejun Heo 
381*7a22ad75STejun Heo 	cpu = data >> WORK_STRUCT_FLAG_BITS;
382*7a22ad75STejun Heo 	if (cpu == NR_CPUS)
383*7a22ad75STejun Heo 		return NULL;
384*7a22ad75STejun Heo 
385*7a22ad75STejun Heo 	BUG_ON(cpu >= num_possible_cpus());
386*7a22ad75STejun Heo 	return get_gcwq(cpu);
387365970a1SDavid Howells }
388365970a1SDavid Howells 
3894690c4abSTejun Heo /**
390c8e55f36STejun Heo  * busy_worker_head - return the busy hash head for a work
391c8e55f36STejun Heo  * @gcwq: gcwq of interest
392c8e55f36STejun Heo  * @work: work to be hashed
393c8e55f36STejun Heo  *
394c8e55f36STejun Heo  * Return hash head of @gcwq for @work.
395c8e55f36STejun Heo  *
396c8e55f36STejun Heo  * CONTEXT:
397c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
398c8e55f36STejun Heo  *
399c8e55f36STejun Heo  * RETURNS:
400c8e55f36STejun Heo  * Pointer to the hash head.
401c8e55f36STejun Heo  */
402c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
403c8e55f36STejun Heo 					   struct work_struct *work)
404c8e55f36STejun Heo {
405c8e55f36STejun Heo 	const int base_shift = ilog2(sizeof(struct work_struct));
406c8e55f36STejun Heo 	unsigned long v = (unsigned long)work;
407c8e55f36STejun Heo 
408c8e55f36STejun Heo 	/* simple shift and fold hash, do we need something better? */
409c8e55f36STejun Heo 	v >>= base_shift;
410c8e55f36STejun Heo 	v += v >> BUSY_WORKER_HASH_ORDER;
411c8e55f36STejun Heo 	v &= BUSY_WORKER_HASH_MASK;
412c8e55f36STejun Heo 
413c8e55f36STejun Heo 	return &gcwq->busy_hash[v];
414c8e55f36STejun Heo }
415c8e55f36STejun Heo 
416c8e55f36STejun Heo /**
4178cca0eeaSTejun Heo  * __find_worker_executing_work - find worker which is executing a work
4188cca0eeaSTejun Heo  * @gcwq: gcwq of interest
4198cca0eeaSTejun Heo  * @bwh: hash head as returned by busy_worker_head()
4208cca0eeaSTejun Heo  * @work: work to find worker for
4218cca0eeaSTejun Heo  *
4228cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  @bwh should be
4238cca0eeaSTejun Heo  * the hash head obtained by calling busy_worker_head() with the same
4248cca0eeaSTejun Heo  * work.
4258cca0eeaSTejun Heo  *
4268cca0eeaSTejun Heo  * CONTEXT:
4278cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
4288cca0eeaSTejun Heo  *
4298cca0eeaSTejun Heo  * RETURNS:
4308cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
4318cca0eeaSTejun Heo  * otherwise.
4328cca0eeaSTejun Heo  */
4338cca0eeaSTejun Heo static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
4348cca0eeaSTejun Heo 						   struct hlist_head *bwh,
4358cca0eeaSTejun Heo 						   struct work_struct *work)
4368cca0eeaSTejun Heo {
4378cca0eeaSTejun Heo 	struct worker *worker;
4388cca0eeaSTejun Heo 	struct hlist_node *tmp;
4398cca0eeaSTejun Heo 
4408cca0eeaSTejun Heo 	hlist_for_each_entry(worker, tmp, bwh, hentry)
4418cca0eeaSTejun Heo 		if (worker->current_work == work)
4428cca0eeaSTejun Heo 			return worker;
4438cca0eeaSTejun Heo 	return NULL;
4448cca0eeaSTejun Heo }
4458cca0eeaSTejun Heo 
4468cca0eeaSTejun Heo /**
4478cca0eeaSTejun Heo  * find_worker_executing_work - find worker which is executing a work
4488cca0eeaSTejun Heo  * @gcwq: gcwq of interest
4498cca0eeaSTejun Heo  * @work: work to find worker for
4508cca0eeaSTejun Heo  *
4518cca0eeaSTejun Heo  * Find a worker which is executing @work on @gcwq.  This function is
4528cca0eeaSTejun Heo  * identical to __find_worker_executing_work() except that this
4538cca0eeaSTejun Heo  * function calculates @bwh itself.
4548cca0eeaSTejun Heo  *
4558cca0eeaSTejun Heo  * CONTEXT:
4568cca0eeaSTejun Heo  * spin_lock_irq(gcwq->lock).
4578cca0eeaSTejun Heo  *
4588cca0eeaSTejun Heo  * RETURNS:
4598cca0eeaSTejun Heo  * Pointer to worker which is executing @work if found, NULL
4608cca0eeaSTejun Heo  * otherwise.
4618cca0eeaSTejun Heo  */
4628cca0eeaSTejun Heo static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
4638cca0eeaSTejun Heo 						 struct work_struct *work)
4648cca0eeaSTejun Heo {
4658cca0eeaSTejun Heo 	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
4668cca0eeaSTejun Heo 					    work);
4678cca0eeaSTejun Heo }
4688cca0eeaSTejun Heo 
4698cca0eeaSTejun Heo /**
4704690c4abSTejun Heo  * insert_work - insert a work into cwq
4714690c4abSTejun Heo  * @cwq: cwq @work belongs to
4724690c4abSTejun Heo  * @work: work to insert
4734690c4abSTejun Heo  * @head: insertion point
4744690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
4754690c4abSTejun Heo  *
4764690c4abSTejun Heo  * Insert @work into @cwq after @head.
4774690c4abSTejun Heo  *
4784690c4abSTejun Heo  * CONTEXT:
4798b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
4804690c4abSTejun Heo  */
481b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
4824690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
4834690c4abSTejun Heo 			unsigned int extra_flags)
484b89deed3SOleg Nesterov {
4854690c4abSTejun Heo 	/* we own @work, set data and link */
486*7a22ad75STejun Heo 	set_work_cwq(work, cwq, extra_flags);
4874690c4abSTejun Heo 
4886e84d644SOleg Nesterov 	/*
4896e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
4906e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
4916e84d644SOleg Nesterov 	 */
4926e84d644SOleg Nesterov 	smp_wmb();
4934690c4abSTejun Heo 
4941a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
495c8e55f36STejun Heo 	wake_up_process(cwq->worker->task);
496b89deed3SOleg Nesterov }
497b89deed3SOleg Nesterov 
498502ca9d8STejun Heo /**
499502ca9d8STejun Heo  * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
500502ca9d8STejun Heo  * @cwq: cwq to unbind
501502ca9d8STejun Heo  *
502502ca9d8STejun Heo  * Try to unbind @cwq from single cpu workqueue processing.  If
503502ca9d8STejun Heo  * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
504502ca9d8STejun Heo  *
505502ca9d8STejun Heo  * CONTEXT:
506502ca9d8STejun Heo  * spin_lock_irq(gcwq->lock).
507502ca9d8STejun Heo  */
508502ca9d8STejun Heo static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
509502ca9d8STejun Heo {
510502ca9d8STejun Heo 	struct workqueue_struct *wq = cwq->wq;
511502ca9d8STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
512502ca9d8STejun Heo 
513502ca9d8STejun Heo 	BUG_ON(wq->single_cpu != gcwq->cpu);
514502ca9d8STejun Heo 	/*
515502ca9d8STejun Heo 	 * Unbind from workqueue if @cwq is not frozen.  If frozen,
516502ca9d8STejun Heo 	 * thaw_workqueues() will either restart processing on this
517502ca9d8STejun Heo 	 * cpu or unbind if empty.  This keeps works queued while
518502ca9d8STejun Heo 	 * frozen fully ordered and flushable.
519502ca9d8STejun Heo 	 */
520502ca9d8STejun Heo 	if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
521502ca9d8STejun Heo 		smp_wmb();	/* paired with cmpxchg() in __queue_work() */
522502ca9d8STejun Heo 		wq->single_cpu = NR_CPUS;
523502ca9d8STejun Heo 	}
524502ca9d8STejun Heo }
525502ca9d8STejun Heo 
5264690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
5271da177e4SLinus Torvalds 			 struct work_struct *work)
5281da177e4SLinus Torvalds {
529502ca9d8STejun Heo 	struct global_cwq *gcwq;
530502ca9d8STejun Heo 	struct cpu_workqueue_struct *cwq;
5311e19ffc6STejun Heo 	struct list_head *worklist;
5321da177e4SLinus Torvalds 	unsigned long flags;
533502ca9d8STejun Heo 	bool arbitrate;
5341da177e4SLinus Torvalds 
535dc186ad7SThomas Gleixner 	debug_work_activate(work);
5361e19ffc6STejun Heo 
537502ca9d8STejun Heo 	/* determine gcwq to use */
538502ca9d8STejun Heo 	if (!(wq->flags & WQ_SINGLE_CPU)) {
539502ca9d8STejun Heo 		/* just use the requested cpu for multicpu workqueues */
540502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
5418b03ae3cSTejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
542502ca9d8STejun Heo 	} else {
543502ca9d8STejun Heo 		unsigned int req_cpu = cpu;
544502ca9d8STejun Heo 
545502ca9d8STejun Heo 		/*
546502ca9d8STejun Heo 		 * It's a bit more complex for single cpu workqueues.
547502ca9d8STejun Heo 		 * We first need to determine which cpu is going to be
548502ca9d8STejun Heo 		 * used.  If no cpu is currently serving this
549502ca9d8STejun Heo 		 * workqueue, arbitrate using atomic accesses to
550502ca9d8STejun Heo 		 * wq->single_cpu; otherwise, use the current one.
551502ca9d8STejun Heo 		 */
552502ca9d8STejun Heo 	retry:
553502ca9d8STejun Heo 		cpu = wq->single_cpu;
554502ca9d8STejun Heo 		arbitrate = cpu == NR_CPUS;
555502ca9d8STejun Heo 		if (arbitrate)
556502ca9d8STejun Heo 			cpu = req_cpu;
557502ca9d8STejun Heo 
558502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
559502ca9d8STejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
560502ca9d8STejun Heo 
561502ca9d8STejun Heo 		/*
562502ca9d8STejun Heo 		 * The following cmpxchg() is a full barrier paired
563502ca9d8STejun Heo 		 * with smp_wmb() in cwq_unbind_single_cpu() and
564502ca9d8STejun Heo 		 * guarantees that all changes to wq->st_* fields are
565502ca9d8STejun Heo 		 * visible on the new cpu after this point.
566502ca9d8STejun Heo 		 */
567502ca9d8STejun Heo 		if (arbitrate)
568502ca9d8STejun Heo 			cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
569502ca9d8STejun Heo 
570502ca9d8STejun Heo 		if (unlikely(wq->single_cpu != cpu)) {
571502ca9d8STejun Heo 			spin_unlock_irqrestore(&gcwq->lock, flags);
572502ca9d8STejun Heo 			goto retry;
573502ca9d8STejun Heo 		}
574502ca9d8STejun Heo 	}
575502ca9d8STejun Heo 
576502ca9d8STejun Heo 	/* gcwq determined, get cwq and queue */
577502ca9d8STejun Heo 	cwq = get_cwq(gcwq->cpu, wq);
578502ca9d8STejun Heo 
5794690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
5801e19ffc6STejun Heo 
58173f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
5821e19ffc6STejun Heo 
5831e19ffc6STejun Heo 	if (likely(cwq->nr_active < cwq->max_active)) {
5841e19ffc6STejun Heo 		cwq->nr_active++;
5851e19ffc6STejun Heo 		worklist = &cwq->worklist;
5861e19ffc6STejun Heo 	} else
5871e19ffc6STejun Heo 		worklist = &cwq->delayed_works;
5881e19ffc6STejun Heo 
5891e19ffc6STejun Heo 	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
5901e19ffc6STejun Heo 
5918b03ae3cSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
5921da177e4SLinus Torvalds }
5931da177e4SLinus Torvalds 
5940fcb78c2SRolf Eike Beer /**
5950fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
5960fcb78c2SRolf Eike Beer  * @wq: workqueue to use
5970fcb78c2SRolf Eike Beer  * @work: work to queue
5980fcb78c2SRolf Eike Beer  *
599057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
6001da177e4SLinus Torvalds  *
60100dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
60200dfcaf7SOleg Nesterov  * it can be processed by another CPU.
6031da177e4SLinus Torvalds  */
6047ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
6051da177e4SLinus Torvalds {
606ef1ca236SOleg Nesterov 	int ret;
6071da177e4SLinus Torvalds 
608ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
609a848e3b6SOleg Nesterov 	put_cpu();
610ef1ca236SOleg Nesterov 
6111da177e4SLinus Torvalds 	return ret;
6121da177e4SLinus Torvalds }
613ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
6141da177e4SLinus Torvalds 
615c1a220e7SZhang Rui /**
616c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
617c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
618c1a220e7SZhang Rui  * @wq: workqueue to use
619c1a220e7SZhang Rui  * @work: work to queue
620c1a220e7SZhang Rui  *
621c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
622c1a220e7SZhang Rui  *
623c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
624c1a220e7SZhang Rui  * can't go away.
625c1a220e7SZhang Rui  */
626c1a220e7SZhang Rui int
627c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
628c1a220e7SZhang Rui {
629c1a220e7SZhang Rui 	int ret = 0;
630c1a220e7SZhang Rui 
63122df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
6324690c4abSTejun Heo 		__queue_work(cpu, wq, work);
633c1a220e7SZhang Rui 		ret = 1;
634c1a220e7SZhang Rui 	}
635c1a220e7SZhang Rui 	return ret;
636c1a220e7SZhang Rui }
637c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
638c1a220e7SZhang Rui 
6396d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
6401da177e4SLinus Torvalds {
64152bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
642*7a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
6431da177e4SLinus Torvalds 
6444690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
6451da177e4SLinus Torvalds }
6461da177e4SLinus Torvalds 
6470fcb78c2SRolf Eike Beer /**
6480fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
6490fcb78c2SRolf Eike Beer  * @wq: workqueue to use
650af9997e4SRandy Dunlap  * @dwork: delayable work to queue
6510fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
6520fcb78c2SRolf Eike Beer  *
653057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
6540fcb78c2SRolf Eike Beer  */
6557ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
65652bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
6571da177e4SLinus Torvalds {
65852bad64dSDavid Howells 	if (delay == 0)
65963bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
6601da177e4SLinus Torvalds 
66163bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
6621da177e4SLinus Torvalds }
663ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
6641da177e4SLinus Torvalds 
6650fcb78c2SRolf Eike Beer /**
6660fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
6670fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
6680fcb78c2SRolf Eike Beer  * @wq: workqueue to use
669af9997e4SRandy Dunlap  * @dwork: work to queue
6700fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
6710fcb78c2SRolf Eike Beer  *
672057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
6730fcb78c2SRolf Eike Beer  */
6747a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
67552bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
6767a6bc1cdSVenkatesh Pallipadi {
6777a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
67852bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
67952bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
6807a6bc1cdSVenkatesh Pallipadi 
68122df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
682*7a22ad75STejun Heo 		struct global_cwq *gcwq = get_work_gcwq(work);
683*7a22ad75STejun Heo 		unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
684*7a22ad75STejun Heo 
6857a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
6867a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
6877a6bc1cdSVenkatesh Pallipadi 
6888a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
689*7a22ad75STejun Heo 		/*
690*7a22ad75STejun Heo 		 * This stores cwq for the moment, for the timer_fn.
691*7a22ad75STejun Heo 		 * Note that the work's gcwq is preserved to allow
692*7a22ad75STejun Heo 		 * reentrance detection for delayed works.
693*7a22ad75STejun Heo 		 */
694*7a22ad75STejun Heo 		set_work_cwq(work, get_cwq(lcpu, wq), 0);
6957a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
69652bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
6977a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
69863bc0362SOleg Nesterov 
69963bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
7007a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
70163bc0362SOleg Nesterov 		else
70263bc0362SOleg Nesterov 			add_timer(timer);
7037a6bc1cdSVenkatesh Pallipadi 		ret = 1;
7047a6bc1cdSVenkatesh Pallipadi 	}
7057a6bc1cdSVenkatesh Pallipadi 	return ret;
7067a6bc1cdSVenkatesh Pallipadi }
707ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
7081da177e4SLinus Torvalds 
709c8e55f36STejun Heo /**
710c8e55f36STejun Heo  * worker_enter_idle - enter idle state
711c8e55f36STejun Heo  * @worker: worker which is entering idle state
712c8e55f36STejun Heo  *
713c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
714c8e55f36STejun Heo  * necessary.
715c8e55f36STejun Heo  *
716c8e55f36STejun Heo  * LOCKING:
717c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
718c8e55f36STejun Heo  */
719c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
720c8e55f36STejun Heo {
721c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
722c8e55f36STejun Heo 
723c8e55f36STejun Heo 	BUG_ON(worker->flags & WORKER_IDLE);
724c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->entry) &&
725c8e55f36STejun Heo 	       (worker->hentry.next || worker->hentry.pprev));
726c8e55f36STejun Heo 
727c8e55f36STejun Heo 	worker->flags |= WORKER_IDLE;
728c8e55f36STejun Heo 	gcwq->nr_idle++;
729c8e55f36STejun Heo 
730c8e55f36STejun Heo 	/* idle_list is LIFO */
731c8e55f36STejun Heo 	list_add(&worker->entry, &gcwq->idle_list);
732db7bccf4STejun Heo 
733db7bccf4STejun Heo 	if (unlikely(worker->flags & WORKER_ROGUE))
734db7bccf4STejun Heo 		wake_up_all(&gcwq->trustee_wait);
735c8e55f36STejun Heo }
736c8e55f36STejun Heo 
737c8e55f36STejun Heo /**
738c8e55f36STejun Heo  * worker_leave_idle - leave idle state
739c8e55f36STejun Heo  * @worker: worker which is leaving idle state
740c8e55f36STejun Heo  *
741c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
742c8e55f36STejun Heo  *
743c8e55f36STejun Heo  * LOCKING:
744c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
745c8e55f36STejun Heo  */
746c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
747c8e55f36STejun Heo {
748c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
749c8e55f36STejun Heo 
750c8e55f36STejun Heo 	BUG_ON(!(worker->flags & WORKER_IDLE));
751c8e55f36STejun Heo 	worker->flags &= ~WORKER_IDLE;
752c8e55f36STejun Heo 	gcwq->nr_idle--;
753c8e55f36STejun Heo 	list_del_init(&worker->entry);
754c8e55f36STejun Heo }
755c8e55f36STejun Heo 
756c34056a3STejun Heo static struct worker *alloc_worker(void)
757c34056a3STejun Heo {
758c34056a3STejun Heo 	struct worker *worker;
759c34056a3STejun Heo 
760c34056a3STejun Heo 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
761c8e55f36STejun Heo 	if (worker) {
762c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
763affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
764c8e55f36STejun Heo 	}
765c34056a3STejun Heo 	return worker;
766c34056a3STejun Heo }
767c34056a3STejun Heo 
768c34056a3STejun Heo /**
769c34056a3STejun Heo  * create_worker - create a new workqueue worker
770c34056a3STejun Heo  * @cwq: cwq the new worker will belong to
771c34056a3STejun Heo  * @bind: whether to set affinity to @cpu or not
772c34056a3STejun Heo  *
773c34056a3STejun Heo  * Create a new worker which is bound to @cwq.  The returned worker
774c34056a3STejun Heo  * can be started by calling start_worker() or destroyed using
775c34056a3STejun Heo  * destroy_worker().
776c34056a3STejun Heo  *
777c34056a3STejun Heo  * CONTEXT:
778c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
779c34056a3STejun Heo  *
780c34056a3STejun Heo  * RETURNS:
781c34056a3STejun Heo  * Pointer to the newly created worker.
782c34056a3STejun Heo  */
783c34056a3STejun Heo static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
784c34056a3STejun Heo {
7858b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
786c34056a3STejun Heo 	int id = -1;
787c34056a3STejun Heo 	struct worker *worker = NULL;
788c34056a3STejun Heo 
7898b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
7908b03ae3cSTejun Heo 	while (ida_get_new(&gcwq->worker_ida, &id)) {
7918b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
7928b03ae3cSTejun Heo 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
793c34056a3STejun Heo 			goto fail;
7948b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
795c34056a3STejun Heo 	}
7968b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
797c34056a3STejun Heo 
798c34056a3STejun Heo 	worker = alloc_worker();
799c34056a3STejun Heo 	if (!worker)
800c34056a3STejun Heo 		goto fail;
801c34056a3STejun Heo 
8028b03ae3cSTejun Heo 	worker->gcwq = gcwq;
803c34056a3STejun Heo 	worker->cwq = cwq;
804c34056a3STejun Heo 	worker->id = id;
805c34056a3STejun Heo 
806c34056a3STejun Heo 	worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
8078b03ae3cSTejun Heo 				      gcwq->cpu, id);
808c34056a3STejun Heo 	if (IS_ERR(worker->task))
809c34056a3STejun Heo 		goto fail;
810c34056a3STejun Heo 
811db7bccf4STejun Heo 	/*
812db7bccf4STejun Heo 	 * A rogue worker will become a regular one if CPU comes
813db7bccf4STejun Heo 	 * online later on.  Make sure every worker has
814db7bccf4STejun Heo 	 * PF_THREAD_BOUND set.
815db7bccf4STejun Heo 	 */
816c34056a3STejun Heo 	if (bind)
8178b03ae3cSTejun Heo 		kthread_bind(worker->task, gcwq->cpu);
818db7bccf4STejun Heo 	else
819db7bccf4STejun Heo 		worker->task->flags |= PF_THREAD_BOUND;
820c34056a3STejun Heo 
821c34056a3STejun Heo 	return worker;
822c34056a3STejun Heo fail:
823c34056a3STejun Heo 	if (id >= 0) {
8248b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
8258b03ae3cSTejun Heo 		ida_remove(&gcwq->worker_ida, id);
8268b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
827c34056a3STejun Heo 	}
828c34056a3STejun Heo 	kfree(worker);
829c34056a3STejun Heo 	return NULL;
830c34056a3STejun Heo }
831c34056a3STejun Heo 
832c34056a3STejun Heo /**
833c34056a3STejun Heo  * start_worker - start a newly created worker
834c34056a3STejun Heo  * @worker: worker to start
835c34056a3STejun Heo  *
836c8e55f36STejun Heo  * Make the gcwq aware of @worker and start it.
837c34056a3STejun Heo  *
838c34056a3STejun Heo  * CONTEXT:
8398b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
840c34056a3STejun Heo  */
841c34056a3STejun Heo static void start_worker(struct worker *worker)
842c34056a3STejun Heo {
843c8e55f36STejun Heo 	worker->flags |= WORKER_STARTED;
844c8e55f36STejun Heo 	worker->gcwq->nr_workers++;
845c8e55f36STejun Heo 	worker_enter_idle(worker);
846c34056a3STejun Heo 	wake_up_process(worker->task);
847c34056a3STejun Heo }
848c34056a3STejun Heo 
849c34056a3STejun Heo /**
850c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
851c34056a3STejun Heo  * @worker: worker to be destroyed
852c34056a3STejun Heo  *
853c8e55f36STejun Heo  * Destroy @worker and adjust @gcwq stats accordingly.
854c8e55f36STejun Heo  *
855c8e55f36STejun Heo  * CONTEXT:
856c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
857c34056a3STejun Heo  */
858c34056a3STejun Heo static void destroy_worker(struct worker *worker)
859c34056a3STejun Heo {
8608b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
861c34056a3STejun Heo 	int id = worker->id;
862c34056a3STejun Heo 
863c34056a3STejun Heo 	/* sanity check frenzy */
864c34056a3STejun Heo 	BUG_ON(worker->current_work);
865affee4b2STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
866c34056a3STejun Heo 
867c8e55f36STejun Heo 	if (worker->flags & WORKER_STARTED)
868c8e55f36STejun Heo 		gcwq->nr_workers--;
869c8e55f36STejun Heo 	if (worker->flags & WORKER_IDLE)
870c8e55f36STejun Heo 		gcwq->nr_idle--;
871c8e55f36STejun Heo 
872c8e55f36STejun Heo 	list_del_init(&worker->entry);
873c8e55f36STejun Heo 	worker->flags |= WORKER_DIE;
874c8e55f36STejun Heo 
875c8e55f36STejun Heo 	spin_unlock_irq(&gcwq->lock);
876c8e55f36STejun Heo 
877c34056a3STejun Heo 	kthread_stop(worker->task);
878c34056a3STejun Heo 	kfree(worker);
879c34056a3STejun Heo 
8808b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
8818b03ae3cSTejun Heo 	ida_remove(&gcwq->worker_ida, id);
882c34056a3STejun Heo }
883c34056a3STejun Heo 
884a62428c0STejun Heo /**
885affee4b2STejun Heo  * move_linked_works - move linked works to a list
886affee4b2STejun Heo  * @work: start of series of works to be scheduled
887affee4b2STejun Heo  * @head: target list to append @work to
888affee4b2STejun Heo  * @nextp: out paramter for nested worklist walking
889affee4b2STejun Heo  *
890affee4b2STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
891affee4b2STejun Heo  * be scheduled starts at @work and includes any consecutive work with
892affee4b2STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
893affee4b2STejun Heo  *
894affee4b2STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
895affee4b2STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
896affee4b2STejun Heo  * nested inside outer list_for_each_entry_safe().
897affee4b2STejun Heo  *
898affee4b2STejun Heo  * CONTEXT:
8998b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
900affee4b2STejun Heo  */
901affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
902affee4b2STejun Heo 			      struct work_struct **nextp)
903affee4b2STejun Heo {
904affee4b2STejun Heo 	struct work_struct *n;
905affee4b2STejun Heo 
906affee4b2STejun Heo 	/*
907affee4b2STejun Heo 	 * Linked worklist will always end before the end of the list,
908affee4b2STejun Heo 	 * use NULL for list head.
909affee4b2STejun Heo 	 */
910affee4b2STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
911affee4b2STejun Heo 		list_move_tail(&work->entry, head);
912affee4b2STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
913affee4b2STejun Heo 			break;
914affee4b2STejun Heo 	}
915affee4b2STejun Heo 
916affee4b2STejun Heo 	/*
917affee4b2STejun Heo 	 * If we're already inside safe list traversal and have moved
918affee4b2STejun Heo 	 * multiple works to the scheduled queue, the next position
919affee4b2STejun Heo 	 * needs to be updated.
920affee4b2STejun Heo 	 */
921affee4b2STejun Heo 	if (nextp)
922affee4b2STejun Heo 		*nextp = n;
923affee4b2STejun Heo }
924affee4b2STejun Heo 
9251e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
9261e19ffc6STejun Heo {
9271e19ffc6STejun Heo 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
9281e19ffc6STejun Heo 						    struct work_struct, entry);
9291e19ffc6STejun Heo 
9301e19ffc6STejun Heo 	move_linked_works(work, &cwq->worklist, NULL);
9311e19ffc6STejun Heo 	cwq->nr_active++;
9321e19ffc6STejun Heo }
9331e19ffc6STejun Heo 
934affee4b2STejun Heo /**
93573f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
93673f53c4aSTejun Heo  * @cwq: cwq of interest
93773f53c4aSTejun Heo  * @color: color of work which left the queue
93873f53c4aSTejun Heo  *
93973f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
94073f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
94173f53c4aSTejun Heo  *
94273f53c4aSTejun Heo  * CONTEXT:
9438b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
94473f53c4aSTejun Heo  */
94573f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
94673f53c4aSTejun Heo {
94773f53c4aSTejun Heo 	/* ignore uncolored works */
94873f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
94973f53c4aSTejun Heo 		return;
95073f53c4aSTejun Heo 
95173f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
9521e19ffc6STejun Heo 	cwq->nr_active--;
9531e19ffc6STejun Heo 
954502ca9d8STejun Heo 	if (!list_empty(&cwq->delayed_works)) {
9551e19ffc6STejun Heo 		/* one down, submit a delayed one */
956502ca9d8STejun Heo 		if (cwq->nr_active < cwq->max_active)
9571e19ffc6STejun Heo 			cwq_activate_first_delayed(cwq);
958502ca9d8STejun Heo 	} else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
959502ca9d8STejun Heo 		/* this was the last work, unbind from single cpu */
960502ca9d8STejun Heo 		cwq_unbind_single_cpu(cwq);
961502ca9d8STejun Heo 	}
96273f53c4aSTejun Heo 
96373f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
96473f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
96573f53c4aSTejun Heo 		return;
96673f53c4aSTejun Heo 
96773f53c4aSTejun Heo 	/* are there still in-flight works? */
96873f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
96973f53c4aSTejun Heo 		return;
97073f53c4aSTejun Heo 
97173f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
97273f53c4aSTejun Heo 	cwq->flush_color = -1;
97373f53c4aSTejun Heo 
97473f53c4aSTejun Heo 	/*
97573f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
97673f53c4aSTejun Heo 	 * will handle the rest.
97773f53c4aSTejun Heo 	 */
97873f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
97973f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
98073f53c4aSTejun Heo }
98173f53c4aSTejun Heo 
98273f53c4aSTejun Heo /**
983a62428c0STejun Heo  * process_one_work - process single work
984c34056a3STejun Heo  * @worker: self
985a62428c0STejun Heo  * @work: work to process
986a62428c0STejun Heo  *
987a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
988a62428c0STejun Heo  * process a single work including synchronization against and
989a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
990a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
991a62428c0STejun Heo  * call this function to process a work.
992a62428c0STejun Heo  *
993a62428c0STejun Heo  * CONTEXT:
9948b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
995a62428c0STejun Heo  */
996c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
9971da177e4SLinus Torvalds {
998c34056a3STejun Heo 	struct cpu_workqueue_struct *cwq = worker->cwq;
9998b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
1000c8e55f36STejun Heo 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
10016bb49e59SDavid Howells 	work_func_t f = work->func;
100273f53c4aSTejun Heo 	int work_color;
10034e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
10044e6045f1SJohannes Berg 	/*
1005a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
1006a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
1007a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
1008a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
1009a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
10104e6045f1SJohannes Berg 	 */
10114e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
10124e6045f1SJohannes Berg #endif
1013a62428c0STejun Heo 	/* claim and process */
1014dc186ad7SThomas Gleixner 	debug_work_deactivate(work);
1015c8e55f36STejun Heo 	hlist_add_head(&worker->hentry, bwh);
1016c34056a3STejun Heo 	worker->current_work = work;
10178cca0eeaSTejun Heo 	worker->current_cwq = cwq;
101873f53c4aSTejun Heo 	work_color = get_work_color(work);
1019*7a22ad75STejun Heo 
1020*7a22ad75STejun Heo 	BUG_ON(get_work_cwq(work) != cwq);
1021*7a22ad75STejun Heo 	/* record the current cpu number in the work data and dequeue */
1022*7a22ad75STejun Heo 	set_work_cpu(work, gcwq->cpu);
1023a62428c0STejun Heo 	list_del_init(&work->entry);
1024a62428c0STejun Heo 
10258b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
10261da177e4SLinus Torvalds 
102723b2e599SOleg Nesterov 	work_clear_pending(work);
10283295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
10293295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
103065f27f38SDavid Howells 	f(work);
10313295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
10323295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
10331da177e4SLinus Torvalds 
1034d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1035d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1036d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
1037a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
1038d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
1039d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
1040d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
1041d5abe669SPeter Zijlstra 		dump_stack();
1042d5abe669SPeter Zijlstra 	}
1043d5abe669SPeter Zijlstra 
10448b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1045a62428c0STejun Heo 
1046a62428c0STejun Heo 	/* we're done with it, release */
1047c8e55f36STejun Heo 	hlist_del_init(&worker->hentry);
1048c34056a3STejun Heo 	worker->current_work = NULL;
10498cca0eeaSTejun Heo 	worker->current_cwq = NULL;
105073f53c4aSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color);
10511da177e4SLinus Torvalds }
1052a62428c0STejun Heo 
1053affee4b2STejun Heo /**
1054affee4b2STejun Heo  * process_scheduled_works - process scheduled works
1055affee4b2STejun Heo  * @worker: self
1056affee4b2STejun Heo  *
1057affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
1058affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
1059affee4b2STejun Heo  * fetches a work from the top and executes it.
1060affee4b2STejun Heo  *
1061affee4b2STejun Heo  * CONTEXT:
10628b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1063affee4b2STejun Heo  * multiple times.
1064affee4b2STejun Heo  */
1065affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
1066a62428c0STejun Heo {
1067affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
1068affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
1069a62428c0STejun Heo 						struct work_struct, entry);
1070c34056a3STejun Heo 		process_one_work(worker, work);
1071a62428c0STejun Heo 	}
10721da177e4SLinus Torvalds }
10731da177e4SLinus Torvalds 
10744690c4abSTejun Heo /**
10754690c4abSTejun Heo  * worker_thread - the worker thread function
1076c34056a3STejun Heo  * @__worker: self
10774690c4abSTejun Heo  *
10784690c4abSTejun Heo  * The cwq worker thread function.
10794690c4abSTejun Heo  */
1080c34056a3STejun Heo static int worker_thread(void *__worker)
10811da177e4SLinus Torvalds {
1082c34056a3STejun Heo 	struct worker *worker = __worker;
10838b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
1084c34056a3STejun Heo 	struct cpu_workqueue_struct *cwq = worker->cwq;
10851da177e4SLinus Torvalds 
1086c8e55f36STejun Heo woke_up:
10878b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1088affee4b2STejun Heo 
1089c8e55f36STejun Heo 	/* DIE can be set only while we're idle, checking here is enough */
1090c8e55f36STejun Heo 	if (worker->flags & WORKER_DIE) {
1091c8e55f36STejun Heo 		spin_unlock_irq(&gcwq->lock);
1092c8e55f36STejun Heo 		return 0;
1093c8e55f36STejun Heo 	}
1094c8e55f36STejun Heo 
1095c8e55f36STejun Heo 	worker_leave_idle(worker);
1096db7bccf4STejun Heo recheck:
1097c8e55f36STejun Heo 	/*
1098c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
1099c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
1100c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
1101c8e55f36STejun Heo 	 */
1102c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
1103c8e55f36STejun Heo 
1104affee4b2STejun Heo 	while (!list_empty(&cwq->worklist)) {
1105affee4b2STejun Heo 		struct work_struct *work =
1106affee4b2STejun Heo 			list_first_entry(&cwq->worklist,
1107affee4b2STejun Heo 					 struct work_struct, entry);
1108affee4b2STejun Heo 
1109db7bccf4STejun Heo 		/*
1110db7bccf4STejun Heo 		 * The following is a rather inefficient way to close
1111db7bccf4STejun Heo 		 * race window against cpu hotplug operations.  Will
1112db7bccf4STejun Heo 		 * be replaced soon.
1113db7bccf4STejun Heo 		 */
1114db7bccf4STejun Heo 		if (unlikely(!(worker->flags & WORKER_ROGUE) &&
1115db7bccf4STejun Heo 			     !cpumask_equal(&worker->task->cpus_allowed,
1116db7bccf4STejun Heo 					    get_cpu_mask(gcwq->cpu)))) {
1117db7bccf4STejun Heo 			spin_unlock_irq(&gcwq->lock);
1118db7bccf4STejun Heo 			set_cpus_allowed_ptr(worker->task,
1119db7bccf4STejun Heo 					     get_cpu_mask(gcwq->cpu));
1120db7bccf4STejun Heo 			cpu_relax();
1121db7bccf4STejun Heo 			spin_lock_irq(&gcwq->lock);
1122db7bccf4STejun Heo 			goto recheck;
1123db7bccf4STejun Heo 		}
1124db7bccf4STejun Heo 
1125c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1126affee4b2STejun Heo 			/* optimization path, not strictly necessary */
1127affee4b2STejun Heo 			process_one_work(worker, work);
1128affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
1129affee4b2STejun Heo 				process_scheduled_works(worker);
1130affee4b2STejun Heo 		} else {
1131c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
1132affee4b2STejun Heo 			process_scheduled_works(worker);
1133affee4b2STejun Heo 		}
1134affee4b2STejun Heo 	}
1135affee4b2STejun Heo 
1136c8e55f36STejun Heo 	/*
1137c8e55f36STejun Heo 	 * gcwq->lock is held and there's no work to process, sleep.
1138c8e55f36STejun Heo 	 * Workers are woken up only while holding gcwq->lock, so
1139c8e55f36STejun Heo 	 * setting the current state before releasing gcwq->lock is
1140c8e55f36STejun Heo 	 * enough to prevent losing any event.
1141c8e55f36STejun Heo 	 */
1142c8e55f36STejun Heo 	worker_enter_idle(worker);
1143c8e55f36STejun Heo 	__set_current_state(TASK_INTERRUPTIBLE);
11448b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1145c8e55f36STejun Heo 	schedule();
1146c8e55f36STejun Heo 	goto woke_up;
11471da177e4SLinus Torvalds }
11481da177e4SLinus Torvalds 
1149fc2e4d70SOleg Nesterov struct wq_barrier {
1150fc2e4d70SOleg Nesterov 	struct work_struct	work;
1151fc2e4d70SOleg Nesterov 	struct completion	done;
1152fc2e4d70SOleg Nesterov };
1153fc2e4d70SOleg Nesterov 
1154fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
1155fc2e4d70SOleg Nesterov {
1156fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1157fc2e4d70SOleg Nesterov 	complete(&barr->done);
1158fc2e4d70SOleg Nesterov }
1159fc2e4d70SOleg Nesterov 
11604690c4abSTejun Heo /**
11614690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
11624690c4abSTejun Heo  * @cwq: cwq to insert barrier into
11634690c4abSTejun Heo  * @barr: wq_barrier to insert
1164affee4b2STejun Heo  * @target: target work to attach @barr to
1165affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
11664690c4abSTejun Heo  *
1167affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
1168affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
1169affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
1170affee4b2STejun Heo  * cpu.
1171affee4b2STejun Heo  *
1172affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
1173affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
1174affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
1175affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
1176affee4b2STejun Heo  * after a work with LINKED flag set.
1177affee4b2STejun Heo  *
1178affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
1179affee4b2STejun Heo  * underneath us, so we can't reliably determine cwq from @target.
11804690c4abSTejun Heo  *
11814690c4abSTejun Heo  * CONTEXT:
11828b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
11834690c4abSTejun Heo  */
118483c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1185affee4b2STejun Heo 			      struct wq_barrier *barr,
1186affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
1187fc2e4d70SOleg Nesterov {
1188affee4b2STejun Heo 	struct list_head *head;
1189affee4b2STejun Heo 	unsigned int linked = 0;
1190affee4b2STejun Heo 
1191dc186ad7SThomas Gleixner 	/*
11928b03ae3cSTejun Heo 	 * debugobject calls are safe here even with gcwq->lock locked
1193dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
1194dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
1195dc186ad7SThomas Gleixner 	 * might deadlock.
1196dc186ad7SThomas Gleixner 	 */
1197dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
119822df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1199fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
120083c22520SOleg Nesterov 
1201affee4b2STejun Heo 	/*
1202affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
1203affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
1204affee4b2STejun Heo 	 */
1205affee4b2STejun Heo 	if (worker)
1206affee4b2STejun Heo 		head = worker->scheduled.next;
1207affee4b2STejun Heo 	else {
1208affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
1209affee4b2STejun Heo 
1210affee4b2STejun Heo 		head = target->entry.next;
1211affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
1212affee4b2STejun Heo 		linked = *bits & WORK_STRUCT_LINKED;
1213affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
1214affee4b2STejun Heo 	}
1215affee4b2STejun Heo 
1216dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
1217affee4b2STejun Heo 	insert_work(cwq, &barr->work, head,
1218affee4b2STejun Heo 		    work_color_to_flags(WORK_NO_COLOR) | linked);
1219fc2e4d70SOleg Nesterov }
1220fc2e4d70SOleg Nesterov 
122173f53c4aSTejun Heo /**
122273f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
122373f53c4aSTejun Heo  * @wq: workqueue being flushed
122473f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
122573f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
122673f53c4aSTejun Heo  *
122773f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
122873f53c4aSTejun Heo  *
122973f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
123073f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
123173f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
123273f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
123373f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
123473f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
123573f53c4aSTejun Heo  *
123673f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
123773f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
123873f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
123973f53c4aSTejun Heo  * is returned.
124073f53c4aSTejun Heo  *
124173f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
124273f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
124373f53c4aSTejun Heo  * advanced to @work_color.
124473f53c4aSTejun Heo  *
124573f53c4aSTejun Heo  * CONTEXT:
124673f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
124773f53c4aSTejun Heo  *
124873f53c4aSTejun Heo  * RETURNS:
124973f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
125073f53c4aSTejun Heo  * otherwise.
125173f53c4aSTejun Heo  */
125273f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
125373f53c4aSTejun Heo 				      int flush_color, int work_color)
12541da177e4SLinus Torvalds {
125573f53c4aSTejun Heo 	bool wait = false;
125673f53c4aSTejun Heo 	unsigned int cpu;
12571da177e4SLinus Torvalds 
125873f53c4aSTejun Heo 	if (flush_color >= 0) {
125973f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
126073f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
126173f53c4aSTejun Heo 	}
126273f53c4aSTejun Heo 
126373f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
126473f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
12658b03ae3cSTejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
12662355b70fSLai Jiangshan 
12678b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
126873f53c4aSTejun Heo 
126973f53c4aSTejun Heo 		if (flush_color >= 0) {
127073f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
127173f53c4aSTejun Heo 
127273f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
127373f53c4aSTejun Heo 				cwq->flush_color = flush_color;
127473f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
127573f53c4aSTejun Heo 				wait = true;
127683c22520SOleg Nesterov 			}
127773f53c4aSTejun Heo 		}
127873f53c4aSTejun Heo 
127973f53c4aSTejun Heo 		if (work_color >= 0) {
128073f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
128173f53c4aSTejun Heo 			cwq->work_color = work_color;
128273f53c4aSTejun Heo 		}
128373f53c4aSTejun Heo 
12848b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1285dc186ad7SThomas Gleixner 	}
128614441960SOleg Nesterov 
128773f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
128873f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
128973f53c4aSTejun Heo 
129073f53c4aSTejun Heo 	return wait;
129183c22520SOleg Nesterov }
12921da177e4SLinus Torvalds 
12930fcb78c2SRolf Eike Beer /**
12941da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
12950fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
12961da177e4SLinus Torvalds  *
12971da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
12981da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
12991da177e4SLinus Torvalds  *
1300fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
1301fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
13021da177e4SLinus Torvalds  */
13037ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
13041da177e4SLinus Torvalds {
130573f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
130673f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
130773f53c4aSTejun Heo 		.flush_color = -1,
130873f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
130973f53c4aSTejun Heo 	};
131073f53c4aSTejun Heo 	int next_color;
1311b1f4ec17SOleg Nesterov 
13123295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
13133295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
131473f53c4aSTejun Heo 
131573f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
131673f53c4aSTejun Heo 
131773f53c4aSTejun Heo 	/*
131873f53c4aSTejun Heo 	 * Start-to-wait phase
131973f53c4aSTejun Heo 	 */
132073f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
132173f53c4aSTejun Heo 
132273f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
132373f53c4aSTejun Heo 		/*
132473f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
132573f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
132673f53c4aSTejun Heo 		 * by one.
132773f53c4aSTejun Heo 		 */
132873f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
132973f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
133073f53c4aSTejun Heo 		wq->work_color = next_color;
133173f53c4aSTejun Heo 
133273f53c4aSTejun Heo 		if (!wq->first_flusher) {
133373f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
133473f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
133573f53c4aSTejun Heo 
133673f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
133773f53c4aSTejun Heo 
133873f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
133973f53c4aSTejun Heo 						       wq->work_color)) {
134073f53c4aSTejun Heo 				/* nothing to flush, done */
134173f53c4aSTejun Heo 				wq->flush_color = next_color;
134273f53c4aSTejun Heo 				wq->first_flusher = NULL;
134373f53c4aSTejun Heo 				goto out_unlock;
134473f53c4aSTejun Heo 			}
134573f53c4aSTejun Heo 		} else {
134673f53c4aSTejun Heo 			/* wait in queue */
134773f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
134873f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
134973f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
135073f53c4aSTejun Heo 		}
135173f53c4aSTejun Heo 	} else {
135273f53c4aSTejun Heo 		/*
135373f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
135473f53c4aSTejun Heo 		 * The next flush completion will assign us
135573f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
135673f53c4aSTejun Heo 		 */
135773f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
135873f53c4aSTejun Heo 	}
135973f53c4aSTejun Heo 
136073f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
136173f53c4aSTejun Heo 
136273f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
136373f53c4aSTejun Heo 
136473f53c4aSTejun Heo 	/*
136573f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
136673f53c4aSTejun Heo 	 *
136773f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
136873f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
136973f53c4aSTejun Heo 	 */
137073f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
137173f53c4aSTejun Heo 		return;
137273f53c4aSTejun Heo 
137373f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
137473f53c4aSTejun Heo 
137573f53c4aSTejun Heo 	wq->first_flusher = NULL;
137673f53c4aSTejun Heo 
137773f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
137873f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
137973f53c4aSTejun Heo 
138073f53c4aSTejun Heo 	while (true) {
138173f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
138273f53c4aSTejun Heo 
138373f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
138473f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
138573f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
138673f53c4aSTejun Heo 				break;
138773f53c4aSTejun Heo 			list_del_init(&next->list);
138873f53c4aSTejun Heo 			complete(&next->done);
138973f53c4aSTejun Heo 		}
139073f53c4aSTejun Heo 
139173f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
139273f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
139373f53c4aSTejun Heo 
139473f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
139573f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
139673f53c4aSTejun Heo 
139773f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
139873f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
139973f53c4aSTejun Heo 			/*
140073f53c4aSTejun Heo 			 * Assign the same color to all overflowed
140173f53c4aSTejun Heo 			 * flushers, advance work_color and append to
140273f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
140373f53c4aSTejun Heo 			 * phase for these overflowed flushers.
140473f53c4aSTejun Heo 			 */
140573f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
140673f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
140773f53c4aSTejun Heo 
140873f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
140973f53c4aSTejun Heo 
141073f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
141173f53c4aSTejun Heo 					      &wq->flusher_queue);
141273f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
141373f53c4aSTejun Heo 		}
141473f53c4aSTejun Heo 
141573f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
141673f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
141773f53c4aSTejun Heo 			break;
141873f53c4aSTejun Heo 		}
141973f53c4aSTejun Heo 
142073f53c4aSTejun Heo 		/*
142173f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
142273f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
142373f53c4aSTejun Heo 		 */
142473f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
142573f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
142673f53c4aSTejun Heo 
142773f53c4aSTejun Heo 		list_del_init(&next->list);
142873f53c4aSTejun Heo 		wq->first_flusher = next;
142973f53c4aSTejun Heo 
143073f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
143173f53c4aSTejun Heo 			break;
143273f53c4aSTejun Heo 
143373f53c4aSTejun Heo 		/*
143473f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
143573f53c4aSTejun Heo 		 * flusher and repeat cascading.
143673f53c4aSTejun Heo 		 */
143773f53c4aSTejun Heo 		wq->first_flusher = NULL;
143873f53c4aSTejun Heo 	}
143973f53c4aSTejun Heo 
144073f53c4aSTejun Heo out_unlock:
144173f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
14421da177e4SLinus Torvalds }
1443ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
14441da177e4SLinus Torvalds 
1445db700897SOleg Nesterov /**
1446db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
1447db700897SOleg Nesterov  * @work: the work which is to be flushed
1448db700897SOleg Nesterov  *
1449a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
1450a67da70dSOleg Nesterov  *
1451db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
1452db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
1453db700897SOleg Nesterov  * sense to use this function.
1454db700897SOleg Nesterov  */
1455db700897SOleg Nesterov int flush_work(struct work_struct *work)
1456db700897SOleg Nesterov {
1457affee4b2STejun Heo 	struct worker *worker = NULL;
14588b03ae3cSTejun Heo 	struct global_cwq *gcwq;
1459*7a22ad75STejun Heo 	struct cpu_workqueue_struct *cwq;
1460db700897SOleg Nesterov 	struct wq_barrier barr;
1461db700897SOleg Nesterov 
1462db700897SOleg Nesterov 	might_sleep();
1463*7a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
1464*7a22ad75STejun Heo 	if (!gcwq)
1465db700897SOleg Nesterov 		return 0;
1466a67da70dSOleg Nesterov 
14678b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1468db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
1469db700897SOleg Nesterov 		/*
1470db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
1471*7a22ad75STejun Heo 		 * If it was re-queued to a different gcwq under us, we
1472*7a22ad75STejun Heo 		 * are not going to wait.
1473db700897SOleg Nesterov 		 */
1474db700897SOleg Nesterov 		smp_rmb();
1475*7a22ad75STejun Heo 		cwq = get_work_cwq(work);
1476*7a22ad75STejun Heo 		if (unlikely(!cwq || gcwq != cwq->gcwq))
14774690c4abSTejun Heo 			goto already_gone;
1478db700897SOleg Nesterov 	} else {
1479*7a22ad75STejun Heo 		worker = find_worker_executing_work(gcwq, work);
1480affee4b2STejun Heo 		if (!worker)
14814690c4abSTejun Heo 			goto already_gone;
1482*7a22ad75STejun Heo 		cwq = worker->current_cwq;
1483db700897SOleg Nesterov 	}
1484db700897SOleg Nesterov 
1485affee4b2STejun Heo 	insert_wq_barrier(cwq, &barr, work, worker);
14868b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1487*7a22ad75STejun Heo 
1488*7a22ad75STejun Heo 	lock_map_acquire(&cwq->wq->lockdep_map);
1489*7a22ad75STejun Heo 	lock_map_release(&cwq->wq->lockdep_map);
1490*7a22ad75STejun Heo 
1491db700897SOleg Nesterov 	wait_for_completion(&barr.done);
1492dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
1493db700897SOleg Nesterov 	return 1;
14944690c4abSTejun Heo already_gone:
14958b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
14964690c4abSTejun Heo 	return 0;
1497db700897SOleg Nesterov }
1498db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
1499db700897SOleg Nesterov 
15006e84d644SOleg Nesterov /*
15011f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
15026e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
15036e84d644SOleg Nesterov  */
15046e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
15056e84d644SOleg Nesterov {
15068b03ae3cSTejun Heo 	struct global_cwq *gcwq;
15071f1f642eSOleg Nesterov 	int ret = -1;
15086e84d644SOleg Nesterov 
150922df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
15101f1f642eSOleg Nesterov 		return 0;
15116e84d644SOleg Nesterov 
15126e84d644SOleg Nesterov 	/*
15136e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
15146e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
15156e84d644SOleg Nesterov 	 */
1516*7a22ad75STejun Heo 	gcwq = get_work_gcwq(work);
1517*7a22ad75STejun Heo 	if (!gcwq)
15186e84d644SOleg Nesterov 		return ret;
15196e84d644SOleg Nesterov 
15208b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
15216e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
15226e84d644SOleg Nesterov 		/*
1523*7a22ad75STejun Heo 		 * This work is queued, but perhaps we locked the wrong gcwq.
15246e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
15256e84d644SOleg Nesterov 		 * insert_work()->wmb().
15266e84d644SOleg Nesterov 		 */
15276e84d644SOleg Nesterov 		smp_rmb();
1528*7a22ad75STejun Heo 		if (gcwq == get_work_gcwq(work)) {
1529dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
15306e84d644SOleg Nesterov 			list_del_init(&work->entry);
1531*7a22ad75STejun Heo 			cwq_dec_nr_in_flight(get_work_cwq(work),
1532*7a22ad75STejun Heo 					     get_work_color(work));
15336e84d644SOleg Nesterov 			ret = 1;
15346e84d644SOleg Nesterov 		}
15356e84d644SOleg Nesterov 	}
15368b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
15376e84d644SOleg Nesterov 
15386e84d644SOleg Nesterov 	return ret;
15396e84d644SOleg Nesterov }
15406e84d644SOleg Nesterov 
1541*7a22ad75STejun Heo static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
1542b89deed3SOleg Nesterov {
1543b89deed3SOleg Nesterov 	struct wq_barrier barr;
1544affee4b2STejun Heo 	struct worker *worker;
1545b89deed3SOleg Nesterov 
15468b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1547affee4b2STejun Heo 
1548*7a22ad75STejun Heo 	worker = find_worker_executing_work(gcwq, work);
1549*7a22ad75STejun Heo 	if (unlikely(worker))
1550*7a22ad75STejun Heo 		insert_wq_barrier(worker->current_cwq, &barr, work, worker);
1551affee4b2STejun Heo 
15528b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1553b89deed3SOleg Nesterov 
1554affee4b2STejun Heo 	if (unlikely(worker)) {
1555b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
1556dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
1557dc186ad7SThomas Gleixner 	}
1558b89deed3SOleg Nesterov }
1559b89deed3SOleg Nesterov 
15606e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
1561b89deed3SOleg Nesterov {
1562b1f4ec17SOleg Nesterov 	int cpu;
1563b89deed3SOleg Nesterov 
1564f293ea92SOleg Nesterov 	might_sleep();
1565f293ea92SOleg Nesterov 
15663295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
15673295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
15684e6045f1SJohannes Berg 
15691537663fSTejun Heo 	for_each_possible_cpu(cpu)
1570*7a22ad75STejun Heo 		wait_on_cpu_work(get_gcwq(cpu), work);
15716e84d644SOleg Nesterov }
15726e84d644SOleg Nesterov 
15731f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
15741f1f642eSOleg Nesterov 				struct timer_list* timer)
15751f1f642eSOleg Nesterov {
15761f1f642eSOleg Nesterov 	int ret;
15771f1f642eSOleg Nesterov 
15781f1f642eSOleg Nesterov 	do {
15791f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
15801f1f642eSOleg Nesterov 		if (!ret)
15811f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
15821f1f642eSOleg Nesterov 		wait_on_work(work);
15831f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
15841f1f642eSOleg Nesterov 
1585*7a22ad75STejun Heo 	clear_work_data(work);
15861f1f642eSOleg Nesterov 	return ret;
15871f1f642eSOleg Nesterov }
15881f1f642eSOleg Nesterov 
15896e84d644SOleg Nesterov /**
15906e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
15916e84d644SOleg Nesterov  * @work: the work which is to be flushed
15926e84d644SOleg Nesterov  *
15931f1f642eSOleg Nesterov  * Returns true if @work was pending.
15941f1f642eSOleg Nesterov  *
15956e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
15966e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
15976e84d644SOleg Nesterov  * has completed.
15986e84d644SOleg Nesterov  *
15996e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
16006e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
16016e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
16026e84d644SOleg Nesterov  * workqueue.
16036e84d644SOleg Nesterov  *
16046e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
16056e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
16066e84d644SOleg Nesterov  *
16076e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
16086e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
16096e84d644SOleg Nesterov  */
16101f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
16116e84d644SOleg Nesterov {
16121f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
1613b89deed3SOleg Nesterov }
161428e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
1615b89deed3SOleg Nesterov 
16166e84d644SOleg Nesterov /**
1617f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
16186e84d644SOleg Nesterov  * @dwork: the delayed work struct
16196e84d644SOleg Nesterov  *
16201f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
16211f1f642eSOleg Nesterov  *
16226e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
16236e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
16246e84d644SOleg Nesterov  */
16251f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
16266e84d644SOleg Nesterov {
16271f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
16286e84d644SOleg Nesterov }
1629f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
16301da177e4SLinus Torvalds 
16316e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
16321da177e4SLinus Torvalds 
16330fcb78c2SRolf Eike Beer /**
16340fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
16350fcb78c2SRolf Eike Beer  * @work: job to be done
16360fcb78c2SRolf Eike Beer  *
16375b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
16385b0f437dSBart Van Assche  * non-zero otherwise.
16395b0f437dSBart Van Assche  *
16405b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
16415b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
16425b0f437dSBart Van Assche  * workqueue otherwise.
16430fcb78c2SRolf Eike Beer  */
16447ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
16451da177e4SLinus Torvalds {
16461da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
16471da177e4SLinus Torvalds }
1648ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
16491da177e4SLinus Torvalds 
1650c1a220e7SZhang Rui /*
1651c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
1652c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
1653c1a220e7SZhang Rui  * @work: job to be done
1654c1a220e7SZhang Rui  *
1655c1a220e7SZhang Rui  * This puts a job on a specific cpu
1656c1a220e7SZhang Rui  */
1657c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
1658c1a220e7SZhang Rui {
1659c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
1660c1a220e7SZhang Rui }
1661c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
1662c1a220e7SZhang Rui 
16630fcb78c2SRolf Eike Beer /**
16640fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
166552bad64dSDavid Howells  * @dwork: job to be done
166652bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
16670fcb78c2SRolf Eike Beer  *
16680fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
16690fcb78c2SRolf Eike Beer  * workqueue.
16700fcb78c2SRolf Eike Beer  */
16717ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
167282f67cd9SIngo Molnar 					unsigned long delay)
16731da177e4SLinus Torvalds {
167452bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
16751da177e4SLinus Torvalds }
1676ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
16771da177e4SLinus Torvalds 
16780fcb78c2SRolf Eike Beer /**
16798c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
16808c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
16818c53e463SLinus Torvalds  *
16828c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
16838c53e463SLinus Torvalds  */
16848c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
16858c53e463SLinus Torvalds {
16868c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
1687*7a22ad75STejun Heo 		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
16884690c4abSTejun Heo 			     &dwork->work);
16898c53e463SLinus Torvalds 		put_cpu();
16908c53e463SLinus Torvalds 	}
16918c53e463SLinus Torvalds 	flush_work(&dwork->work);
16928c53e463SLinus Torvalds }
16938c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
16948c53e463SLinus Torvalds 
16958c53e463SLinus Torvalds /**
16960fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
16970fcb78c2SRolf Eike Beer  * @cpu: cpu to use
169852bad64dSDavid Howells  * @dwork: job to be done
16990fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
17000fcb78c2SRolf Eike Beer  *
17010fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
17020fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
17030fcb78c2SRolf Eike Beer  */
17041da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
170552bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
17061da177e4SLinus Torvalds {
170752bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
17081da177e4SLinus Torvalds }
1709ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
17101da177e4SLinus Torvalds 
1711b6136773SAndrew Morton /**
1712b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
1713b6136773SAndrew Morton  * @func: the function to call
1714b6136773SAndrew Morton  *
1715b6136773SAndrew Morton  * Returns zero on success.
1716b6136773SAndrew Morton  * Returns -ve errno on failure.
1717b6136773SAndrew Morton  *
1718b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
1719b6136773SAndrew Morton  */
172065f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
172115316ba8SChristoph Lameter {
172215316ba8SChristoph Lameter 	int cpu;
172365a64464SAndi Kleen 	int orig = -1;
1724b6136773SAndrew Morton 	struct work_struct *works;
172515316ba8SChristoph Lameter 
1726b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
1727b6136773SAndrew Morton 	if (!works)
172815316ba8SChristoph Lameter 		return -ENOMEM;
1729b6136773SAndrew Morton 
173095402b38SGautham R Shenoy 	get_online_cpus();
173193981800STejun Heo 
173293981800STejun Heo 	/*
173393981800STejun Heo 	 * When running in keventd don't schedule a work item on
173493981800STejun Heo 	 * itself.  Can just call directly because the work queue is
173593981800STejun Heo 	 * already bound.  This also is faster.
173693981800STejun Heo 	 */
173793981800STejun Heo 	if (current_is_keventd())
173893981800STejun Heo 		orig = raw_smp_processor_id();
173993981800STejun Heo 
174015316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
17419bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
17429bfb1839SIngo Molnar 
17439bfb1839SIngo Molnar 		INIT_WORK(work, func);
174493981800STejun Heo 		if (cpu != orig)
17458de6d308SOleg Nesterov 			schedule_work_on(cpu, work);
174615316ba8SChristoph Lameter 	}
174793981800STejun Heo 	if (orig >= 0)
174893981800STejun Heo 		func(per_cpu_ptr(works, orig));
174993981800STejun Heo 
175093981800STejun Heo 	for_each_online_cpu(cpu)
17518616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
175293981800STejun Heo 
175395402b38SGautham R Shenoy 	put_online_cpus();
1754b6136773SAndrew Morton 	free_percpu(works);
175515316ba8SChristoph Lameter 	return 0;
175615316ba8SChristoph Lameter }
175715316ba8SChristoph Lameter 
1758eef6a7d5SAlan Stern /**
1759eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
1760eef6a7d5SAlan Stern  *
1761eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
1762eef6a7d5SAlan Stern  * completion.
1763eef6a7d5SAlan Stern  *
1764eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
1765eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
1766eef6a7d5SAlan Stern  * will lead to deadlock:
1767eef6a7d5SAlan Stern  *
1768eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
1769eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
1770eef6a7d5SAlan Stern  *
1771eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
1772eef6a7d5SAlan Stern  *
1773eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
1774eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
1775eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
1776eef6a7d5SAlan Stern  *
1777eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
1778eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
1779eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
1780eef6a7d5SAlan Stern  * cancel_work_sync() instead.
1781eef6a7d5SAlan Stern  */
17821da177e4SLinus Torvalds void flush_scheduled_work(void)
17831da177e4SLinus Torvalds {
17841da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
17851da177e4SLinus Torvalds }
1786ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
17871da177e4SLinus Torvalds 
17881da177e4SLinus Torvalds /**
17891fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
17901fa44ecaSJames Bottomley  * @fn:		the function to execute
17911fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
17921fa44ecaSJames Bottomley  *		be available when the work executes)
17931fa44ecaSJames Bottomley  *
17941fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
17951fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
17961fa44ecaSJames Bottomley  *
17971fa44ecaSJames Bottomley  * Returns:	0 - function was executed
17981fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
17991fa44ecaSJames Bottomley  */
180065f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
18011fa44ecaSJames Bottomley {
18021fa44ecaSJames Bottomley 	if (!in_interrupt()) {
180365f27f38SDavid Howells 		fn(&ew->work);
18041fa44ecaSJames Bottomley 		return 0;
18051fa44ecaSJames Bottomley 	}
18061fa44ecaSJames Bottomley 
180765f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
18081fa44ecaSJames Bottomley 	schedule_work(&ew->work);
18091fa44ecaSJames Bottomley 
18101fa44ecaSJames Bottomley 	return 1;
18111fa44ecaSJames Bottomley }
18121fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
18131fa44ecaSJames Bottomley 
18141da177e4SLinus Torvalds int keventd_up(void)
18151da177e4SLinus Torvalds {
18161da177e4SLinus Torvalds 	return keventd_wq != NULL;
18171da177e4SLinus Torvalds }
18181da177e4SLinus Torvalds 
18191da177e4SLinus Torvalds int current_is_keventd(void)
18201da177e4SLinus Torvalds {
18211da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
1822d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
18231da177e4SLinus Torvalds 	int ret = 0;
18241da177e4SLinus Torvalds 
18251da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
18261da177e4SLinus Torvalds 
18271537663fSTejun Heo 	cwq = get_cwq(cpu, keventd_wq);
1828c34056a3STejun Heo 	if (current == cwq->worker->task)
18291da177e4SLinus Torvalds 		ret = 1;
18301da177e4SLinus Torvalds 
18311da177e4SLinus Torvalds 	return ret;
18321da177e4SLinus Torvalds 
18331da177e4SLinus Torvalds }
18341da177e4SLinus Torvalds 
18350f900049STejun Heo static struct cpu_workqueue_struct *alloc_cwqs(void)
18360f900049STejun Heo {
18370f900049STejun Heo 	/*
18380f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
18390f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
18400f900049STejun Heo 	 * unsigned long long.
18410f900049STejun Heo 	 */
18420f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
18430f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
18440f900049STejun Heo 				   __alignof__(unsigned long long));
18450f900049STejun Heo 	struct cpu_workqueue_struct *cwqs;
18460f900049STejun Heo #ifndef CONFIG_SMP
18470f900049STejun Heo 	void *ptr;
18480f900049STejun Heo 
18490f900049STejun Heo 	/*
18500f900049STejun Heo 	 * On UP, percpu allocator doesn't honor alignment parameter
18510f900049STejun Heo 	 * and simply uses arch-dependent default.  Allocate enough
18520f900049STejun Heo 	 * room to align cwq and put an extra pointer at the end
18530f900049STejun Heo 	 * pointing back to the originally allocated pointer which
18540f900049STejun Heo 	 * will be used for free.
18550f900049STejun Heo 	 *
18560f900049STejun Heo 	 * FIXME: This really belongs to UP percpu code.  Update UP
18570f900049STejun Heo 	 * percpu code to honor alignment and remove this ugliness.
18580f900049STejun Heo 	 */
18590f900049STejun Heo 	ptr = __alloc_percpu(size + align + sizeof(void *), 1);
18600f900049STejun Heo 	cwqs = PTR_ALIGN(ptr, align);
18610f900049STejun Heo 	*(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
18620f900049STejun Heo #else
18630f900049STejun Heo 	/* On SMP, percpu allocator can do it itself */
18640f900049STejun Heo 	cwqs = __alloc_percpu(size, align);
18650f900049STejun Heo #endif
18660f900049STejun Heo 	/* just in case, make sure it's actually aligned */
18670f900049STejun Heo 	BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
18680f900049STejun Heo 	return cwqs;
18690f900049STejun Heo }
18700f900049STejun Heo 
18710f900049STejun Heo static void free_cwqs(struct cpu_workqueue_struct *cwqs)
18720f900049STejun Heo {
18730f900049STejun Heo #ifndef CONFIG_SMP
18740f900049STejun Heo 	/* on UP, the pointer to free is stored right after the cwq */
18750f900049STejun Heo 	if (cwqs)
18760f900049STejun Heo 		free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
18770f900049STejun Heo #else
18780f900049STejun Heo 	free_percpu(cwqs);
18790f900049STejun Heo #endif
18800f900049STejun Heo }
18810f900049STejun Heo 
18824e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
188397e37d7bSTejun Heo 						unsigned int flags,
18841e19ffc6STejun Heo 						int max_active,
1885eb13ba87SJohannes Berg 						struct lock_class_key *key,
1886eb13ba87SJohannes Berg 						const char *lock_name)
18873af24433SOleg Nesterov {
18883af24433SOleg Nesterov 	struct workqueue_struct *wq;
1889c34056a3STejun Heo 	bool failed = false;
1890c34056a3STejun Heo 	unsigned int cpu;
18913af24433SOleg Nesterov 
18921e19ffc6STejun Heo 	max_active = clamp_val(max_active, 1, INT_MAX);
18931e19ffc6STejun Heo 
18943af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
18953af24433SOleg Nesterov 	if (!wq)
18964690c4abSTejun Heo 		goto err;
18973af24433SOleg Nesterov 
18980f900049STejun Heo 	wq->cpu_wq = alloc_cwqs();
18994690c4abSTejun Heo 	if (!wq->cpu_wq)
19004690c4abSTejun Heo 		goto err;
19013af24433SOleg Nesterov 
190297e37d7bSTejun Heo 	wq->flags = flags;
1903a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
190473f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
190573f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
190673f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
190773f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
1908502ca9d8STejun Heo 	wq->single_cpu = NR_CPUS;
1909502ca9d8STejun Heo 
19103af24433SOleg Nesterov 	wq->name = name;
1911eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1912cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
19133af24433SOleg Nesterov 
19143da1c84cSOleg Nesterov 	cpu_maps_update_begin();
19156af8bf3dSOleg Nesterov 	/*
19166af8bf3dSOleg Nesterov 	 * We must initialize cwqs for each possible cpu even if we
19176af8bf3dSOleg Nesterov 	 * are going to call destroy_workqueue() finally. Otherwise
19186af8bf3dSOleg Nesterov 	 * cpu_up() can hit the uninitialized cwq once we drop the
19196af8bf3dSOleg Nesterov 	 * lock.
19206af8bf3dSOleg Nesterov 	 */
19213af24433SOleg Nesterov 	for_each_possible_cpu(cpu) {
19221537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
19238b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
19241537663fSTejun Heo 
19250f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
19268b03ae3cSTejun Heo 		cwq->gcwq = gcwq;
1927c34056a3STejun Heo 		cwq->wq = wq;
192873f53c4aSTejun Heo 		cwq->flush_color = -1;
19291e19ffc6STejun Heo 		cwq->max_active = max_active;
19301537663fSTejun Heo 		INIT_LIST_HEAD(&cwq->worklist);
19311e19ffc6STejun Heo 		INIT_LIST_HEAD(&cwq->delayed_works);
19321537663fSTejun Heo 
1933c34056a3STejun Heo 		if (failed)
19343af24433SOleg Nesterov 			continue;
1935502ca9d8STejun Heo 		cwq->worker = create_worker(cwq, cpu_online(cpu));
1936c34056a3STejun Heo 		if (cwq->worker)
1937c34056a3STejun Heo 			start_worker(cwq->worker);
19381537663fSTejun Heo 		else
1939c34056a3STejun Heo 			failed = true;
19403af24433SOleg Nesterov 	}
19411537663fSTejun Heo 
1942a0a1a5fdSTejun Heo 	/*
1943a0a1a5fdSTejun Heo 	 * workqueue_lock protects global freeze state and workqueues
1944a0a1a5fdSTejun Heo 	 * list.  Grab it, set max_active accordingly and add the new
1945a0a1a5fdSTejun Heo 	 * workqueue to workqueues list.
1946a0a1a5fdSTejun Heo 	 */
19471537663fSTejun Heo 	spin_lock(&workqueue_lock);
1948a0a1a5fdSTejun Heo 
1949a0a1a5fdSTejun Heo 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
1950a0a1a5fdSTejun Heo 		for_each_possible_cpu(cpu)
1951a0a1a5fdSTejun Heo 			get_cwq(cpu, wq)->max_active = 0;
1952a0a1a5fdSTejun Heo 
19531537663fSTejun Heo 	list_add(&wq->list, &workqueues);
1954a0a1a5fdSTejun Heo 
19551537663fSTejun Heo 	spin_unlock(&workqueue_lock);
19561537663fSTejun Heo 
19573da1c84cSOleg Nesterov 	cpu_maps_update_done();
19583af24433SOleg Nesterov 
1959c34056a3STejun Heo 	if (failed) {
19603af24433SOleg Nesterov 		destroy_workqueue(wq);
19613af24433SOleg Nesterov 		wq = NULL;
19623af24433SOleg Nesterov 	}
19633af24433SOleg Nesterov 	return wq;
19644690c4abSTejun Heo err:
19654690c4abSTejun Heo 	if (wq) {
19660f900049STejun Heo 		free_cwqs(wq->cpu_wq);
19674690c4abSTejun Heo 		kfree(wq);
19684690c4abSTejun Heo 	}
19694690c4abSTejun Heo 	return NULL;
19703af24433SOleg Nesterov }
19714e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
19723af24433SOleg Nesterov 
19733af24433SOleg Nesterov /**
19743af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
19753af24433SOleg Nesterov  * @wq: target workqueue
19763af24433SOleg Nesterov  *
19773af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
19783af24433SOleg Nesterov  */
19793af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
19803af24433SOleg Nesterov {
1981c8e55f36STejun Heo 	unsigned int cpu;
19823af24433SOleg Nesterov 
1983a0a1a5fdSTejun Heo 	flush_workqueue(wq);
1984a0a1a5fdSTejun Heo 
1985a0a1a5fdSTejun Heo 	/*
1986a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
1987a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
1988a0a1a5fdSTejun Heo 	 */
19893da1c84cSOleg Nesterov 	cpu_maps_update_begin();
199095402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
19913af24433SOleg Nesterov 	list_del(&wq->list);
199295402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
19933da1c84cSOleg Nesterov 	cpu_maps_update_done();
19943af24433SOleg Nesterov 
199573f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
199673f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
199773f53c4aSTejun Heo 		int i;
199873f53c4aSTejun Heo 
1999c34056a3STejun Heo 		if (cwq->worker) {
2000c8e55f36STejun Heo 			spin_lock_irq(&cwq->gcwq->lock);
2001c34056a3STejun Heo 			destroy_worker(cwq->worker);
2002c34056a3STejun Heo 			cwq->worker = NULL;
2003c8e55f36STejun Heo 			spin_unlock_irq(&cwq->gcwq->lock);
200473f53c4aSTejun Heo 		}
200573f53c4aSTejun Heo 
200673f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
200773f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
20081e19ffc6STejun Heo 		BUG_ON(cwq->nr_active);
20091e19ffc6STejun Heo 		BUG_ON(!list_empty(&cwq->delayed_works));
201073f53c4aSTejun Heo 	}
20111537663fSTejun Heo 
20120f900049STejun Heo 	free_cwqs(wq->cpu_wq);
20133af24433SOleg Nesterov 	kfree(wq);
20143af24433SOleg Nesterov }
20153af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
20163af24433SOleg Nesterov 
2017db7bccf4STejun Heo /*
2018db7bccf4STejun Heo  * CPU hotplug.
2019db7bccf4STejun Heo  *
2020db7bccf4STejun Heo  * CPU hotplug is implemented by allowing cwqs to be detached from
2021db7bccf4STejun Heo  * CPU, running with unbound workers and allowing them to be
2022db7bccf4STejun Heo  * reattached later if the cpu comes back online.  A separate thread
2023db7bccf4STejun Heo  * is created to govern cwqs in such state and is called the trustee.
2024db7bccf4STejun Heo  *
2025db7bccf4STejun Heo  * Trustee states and their descriptions.
2026db7bccf4STejun Heo  *
2027db7bccf4STejun Heo  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
2028db7bccf4STejun Heo  *		new trustee is started with this state.
2029db7bccf4STejun Heo  *
2030db7bccf4STejun Heo  * IN_CHARGE	Once started, trustee will enter this state after
2031db7bccf4STejun Heo  *		making all existing workers rogue.  DOWN_PREPARE waits
2032db7bccf4STejun Heo  *		for trustee to enter this state.  After reaching
2033db7bccf4STejun Heo  *		IN_CHARGE, trustee tries to execute the pending
2034db7bccf4STejun Heo  *		worklist until it's empty and the state is set to
2035db7bccf4STejun Heo  *		BUTCHER, or the state is set to RELEASE.
2036db7bccf4STejun Heo  *
2037db7bccf4STejun Heo  * BUTCHER	Command state which is set by the cpu callback after
2038db7bccf4STejun Heo  *		the cpu has went down.  Once this state is set trustee
2039db7bccf4STejun Heo  *		knows that there will be no new works on the worklist
2040db7bccf4STejun Heo  *		and once the worklist is empty it can proceed to
2041db7bccf4STejun Heo  *		killing idle workers.
2042db7bccf4STejun Heo  *
2043db7bccf4STejun Heo  * RELEASE	Command state which is set by the cpu callback if the
2044db7bccf4STejun Heo  *		cpu down has been canceled or it has come online
2045db7bccf4STejun Heo  *		again.  After recognizing this state, trustee stops
2046db7bccf4STejun Heo  *		trying to drain or butcher and transits to DONE.
2047db7bccf4STejun Heo  *
2048db7bccf4STejun Heo  * DONE		Trustee will enter this state after BUTCHER or RELEASE
2049db7bccf4STejun Heo  *		is complete.
2050db7bccf4STejun Heo  *
2051db7bccf4STejun Heo  *          trustee                 CPU                draining
2052db7bccf4STejun Heo  *         took over                down               complete
2053db7bccf4STejun Heo  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2054db7bccf4STejun Heo  *                        |                     |                  ^
2055db7bccf4STejun Heo  *                        | CPU is back online  v   return workers |
2056db7bccf4STejun Heo  *                         ----------------> RELEASE --------------
2057db7bccf4STejun Heo  */
2058db7bccf4STejun Heo 
2059db7bccf4STejun Heo /**
2060db7bccf4STejun Heo  * trustee_wait_event_timeout - timed event wait for trustee
2061db7bccf4STejun Heo  * @cond: condition to wait for
2062db7bccf4STejun Heo  * @timeout: timeout in jiffies
2063db7bccf4STejun Heo  *
2064db7bccf4STejun Heo  * wait_event_timeout() for trustee to use.  Handles locking and
2065db7bccf4STejun Heo  * checks for RELEASE request.
2066db7bccf4STejun Heo  *
2067db7bccf4STejun Heo  * CONTEXT:
2068db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2069db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2070db7bccf4STejun Heo  *
2071db7bccf4STejun Heo  * RETURNS:
2072db7bccf4STejun Heo  * Positive indicating left time if @cond is satisfied, 0 if timed
2073db7bccf4STejun Heo  * out, -1 if canceled.
2074db7bccf4STejun Heo  */
2075db7bccf4STejun Heo #define trustee_wait_event_timeout(cond, timeout) ({			\
2076db7bccf4STejun Heo 	long __ret = (timeout);						\
2077db7bccf4STejun Heo 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
2078db7bccf4STejun Heo 	       __ret) {							\
2079db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);				\
2080db7bccf4STejun Heo 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
2081db7bccf4STejun Heo 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
2082db7bccf4STejun Heo 			__ret);						\
2083db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);				\
2084db7bccf4STejun Heo 	}								\
2085db7bccf4STejun Heo 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
2086db7bccf4STejun Heo })
2087db7bccf4STejun Heo 
2088db7bccf4STejun Heo /**
2089db7bccf4STejun Heo  * trustee_wait_event - event wait for trustee
2090db7bccf4STejun Heo  * @cond: condition to wait for
2091db7bccf4STejun Heo  *
2092db7bccf4STejun Heo  * wait_event() for trustee to use.  Automatically handles locking and
2093db7bccf4STejun Heo  * checks for CANCEL request.
2094db7bccf4STejun Heo  *
2095db7bccf4STejun Heo  * CONTEXT:
2096db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2097db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2098db7bccf4STejun Heo  *
2099db7bccf4STejun Heo  * RETURNS:
2100db7bccf4STejun Heo  * 0 if @cond is satisfied, -1 if canceled.
2101db7bccf4STejun Heo  */
2102db7bccf4STejun Heo #define trustee_wait_event(cond) ({					\
2103db7bccf4STejun Heo 	long __ret1;							\
2104db7bccf4STejun Heo 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2105db7bccf4STejun Heo 	__ret1 < 0 ? -1 : 0;						\
2106db7bccf4STejun Heo })
2107db7bccf4STejun Heo 
2108db7bccf4STejun Heo static int __cpuinit trustee_thread(void *__gcwq)
2109db7bccf4STejun Heo {
2110db7bccf4STejun Heo 	struct global_cwq *gcwq = __gcwq;
2111db7bccf4STejun Heo 	struct worker *worker;
2112db7bccf4STejun Heo 	struct hlist_node *pos;
2113db7bccf4STejun Heo 	int i;
2114db7bccf4STejun Heo 
2115db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2116db7bccf4STejun Heo 
2117db7bccf4STejun Heo 	spin_lock_irq(&gcwq->lock);
2118db7bccf4STejun Heo 	/*
2119502ca9d8STejun Heo 	 * Make all workers rogue.  Trustee must be bound to the
2120502ca9d8STejun Heo 	 * target cpu and can't be cancelled.
2121db7bccf4STejun Heo 	 */
2122db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2123db7bccf4STejun Heo 
2124db7bccf4STejun Heo 	list_for_each_entry(worker, &gcwq->idle_list, entry)
2125db7bccf4STejun Heo 		worker->flags |= WORKER_ROGUE;
2126db7bccf4STejun Heo 
2127db7bccf4STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq)
2128db7bccf4STejun Heo 		worker->flags |= WORKER_ROGUE;
2129db7bccf4STejun Heo 
2130db7bccf4STejun Heo 	/*
2131db7bccf4STejun Heo 	 * We're now in charge.  Notify and proceed to drain.  We need
2132db7bccf4STejun Heo 	 * to keep the gcwq running during the whole CPU down
2133db7bccf4STejun Heo 	 * procedure as other cpu hotunplug callbacks may need to
2134db7bccf4STejun Heo 	 * flush currently running tasks.
2135db7bccf4STejun Heo 	 */
2136db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2137db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
2138db7bccf4STejun Heo 
2139db7bccf4STejun Heo 	/*
2140db7bccf4STejun Heo 	 * The original cpu is in the process of dying and may go away
2141db7bccf4STejun Heo 	 * anytime now.  When that happens, we and all workers would
2142db7bccf4STejun Heo 	 * be migrated to other cpus.  Try draining any left work.
2143db7bccf4STejun Heo 	 * Note that if the gcwq is frozen, there may be frozen works
2144db7bccf4STejun Heo 	 * in freezeable cwqs.  Don't declare completion while frozen.
2145db7bccf4STejun Heo 	 */
2146db7bccf4STejun Heo 	while (gcwq->nr_workers != gcwq->nr_idle ||
2147db7bccf4STejun Heo 	       gcwq->flags & GCWQ_FREEZING ||
2148db7bccf4STejun Heo 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2149db7bccf4STejun Heo 		/* give a breather */
2150db7bccf4STejun Heo 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2151db7bccf4STejun Heo 			break;
2152db7bccf4STejun Heo 	}
2153db7bccf4STejun Heo 
2154db7bccf4STejun Heo 	/* notify completion */
2155db7bccf4STejun Heo 	gcwq->trustee = NULL;
2156db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_DONE;
2157db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
2158db7bccf4STejun Heo 	spin_unlock_irq(&gcwq->lock);
2159db7bccf4STejun Heo 	return 0;
2160db7bccf4STejun Heo }
2161db7bccf4STejun Heo 
2162db7bccf4STejun Heo /**
2163db7bccf4STejun Heo  * wait_trustee_state - wait for trustee to enter the specified state
2164db7bccf4STejun Heo  * @gcwq: gcwq the trustee of interest belongs to
2165db7bccf4STejun Heo  * @state: target state to wait for
2166db7bccf4STejun Heo  *
2167db7bccf4STejun Heo  * Wait for the trustee to reach @state.  DONE is already matched.
2168db7bccf4STejun Heo  *
2169db7bccf4STejun Heo  * CONTEXT:
2170db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2171db7bccf4STejun Heo  * multiple times.  To be used by cpu_callback.
2172db7bccf4STejun Heo  */
2173db7bccf4STejun Heo static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2174db7bccf4STejun Heo {
2175db7bccf4STejun Heo 	if (!(gcwq->trustee_state == state ||
2176db7bccf4STejun Heo 	      gcwq->trustee_state == TRUSTEE_DONE)) {
2177db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);
2178db7bccf4STejun Heo 		__wait_event(gcwq->trustee_wait,
2179db7bccf4STejun Heo 			     gcwq->trustee_state == state ||
2180db7bccf4STejun Heo 			     gcwq->trustee_state == TRUSTEE_DONE);
2181db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);
2182db7bccf4STejun Heo 	}
2183db7bccf4STejun Heo }
2184db7bccf4STejun Heo 
21859c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
21861da177e4SLinus Torvalds 						unsigned long action,
21871da177e4SLinus Torvalds 						void *hcpu)
21881da177e4SLinus Torvalds {
21893af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
2190db7bccf4STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
2191db7bccf4STejun Heo 	struct task_struct *new_trustee = NULL;
2192db7bccf4STejun Heo 	struct worker *worker;
2193db7bccf4STejun Heo 	struct hlist_node *pos;
2194db7bccf4STejun Heo 	unsigned long flags;
2195db7bccf4STejun Heo 	int i;
21961da177e4SLinus Torvalds 
21978bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
21988bb78442SRafael J. Wysocki 
2199db7bccf4STejun Heo 	switch (action) {
2200db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
2201db7bccf4STejun Heo 		new_trustee = kthread_create(trustee_thread, gcwq,
2202db7bccf4STejun Heo 					     "workqueue_trustee/%d\n", cpu);
2203db7bccf4STejun Heo 		if (IS_ERR(new_trustee))
2204db7bccf4STejun Heo 			return notifier_from_errno(PTR_ERR(new_trustee));
2205db7bccf4STejun Heo 		kthread_bind(new_trustee, cpu);
2206db7bccf4STejun Heo 	}
22071537663fSTejun Heo 
2208db7bccf4STejun Heo 	/* some are called w/ irq disabled, don't disturb irq status */
2209db7bccf4STejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
22103af24433SOleg Nesterov 
22113af24433SOleg Nesterov 	switch (action) {
2212db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
2213db7bccf4STejun Heo 		/* initialize trustee and tell it to acquire the gcwq */
2214db7bccf4STejun Heo 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
2215db7bccf4STejun Heo 		gcwq->trustee = new_trustee;
2216db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_START;
2217db7bccf4STejun Heo 		wake_up_process(gcwq->trustee);
2218db7bccf4STejun Heo 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
2219db7bccf4STejun Heo 		break;
2220db7bccf4STejun Heo 
22213da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
2222db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_BUTCHER;
2223db7bccf4STejun Heo 		break;
2224db7bccf4STejun Heo 
2225db7bccf4STejun Heo 	case CPU_DOWN_FAILED:
2226db7bccf4STejun Heo 	case CPU_ONLINE:
2227db7bccf4STejun Heo 		if (gcwq->trustee_state != TRUSTEE_DONE) {
2228db7bccf4STejun Heo 			gcwq->trustee_state = TRUSTEE_RELEASE;
2229db7bccf4STejun Heo 			wake_up_process(gcwq->trustee);
2230db7bccf4STejun Heo 			wait_trustee_state(gcwq, TRUSTEE_DONE);
2231db7bccf4STejun Heo 		}
2232db7bccf4STejun Heo 
2233502ca9d8STejun Heo 		/* clear ROGUE from all workers */
2234db7bccf4STejun Heo 		list_for_each_entry(worker, &gcwq->idle_list, entry)
2235db7bccf4STejun Heo 			worker->flags &= ~WORKER_ROGUE;
2236db7bccf4STejun Heo 
2237db7bccf4STejun Heo 		for_each_busy_worker(worker, i, pos, gcwq)
2238db7bccf4STejun Heo 			worker->flags &= ~WORKER_ROGUE;
22391da177e4SLinus Torvalds 		break;
22401da177e4SLinus Torvalds 	}
2241db7bccf4STejun Heo 
2242db7bccf4STejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
22431da177e4SLinus Torvalds 
22441537663fSTejun Heo 	return notifier_from_errno(0);
22451da177e4SLinus Torvalds }
22461da177e4SLinus Torvalds 
22472d3854a3SRusty Russell #ifdef CONFIG_SMP
22488ccad40dSRusty Russell 
22492d3854a3SRusty Russell struct work_for_cpu {
22506b44003eSAndrew Morton 	struct completion completion;
22512d3854a3SRusty Russell 	long (*fn)(void *);
22522d3854a3SRusty Russell 	void *arg;
22532d3854a3SRusty Russell 	long ret;
22542d3854a3SRusty Russell };
22552d3854a3SRusty Russell 
22566b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
22572d3854a3SRusty Russell {
22586b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
22592d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
22606b44003eSAndrew Morton 	complete(&wfc->completion);
22616b44003eSAndrew Morton 	return 0;
22622d3854a3SRusty Russell }
22632d3854a3SRusty Russell 
22642d3854a3SRusty Russell /**
22652d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
22662d3854a3SRusty Russell  * @cpu: the cpu to run on
22672d3854a3SRusty Russell  * @fn: the function to run
22682d3854a3SRusty Russell  * @arg: the function arg
22692d3854a3SRusty Russell  *
227031ad9081SRusty Russell  * This will return the value @fn returns.
227131ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
22726b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
22732d3854a3SRusty Russell  */
22742d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
22752d3854a3SRusty Russell {
22766b44003eSAndrew Morton 	struct task_struct *sub_thread;
22776b44003eSAndrew Morton 	struct work_for_cpu wfc = {
22786b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
22796b44003eSAndrew Morton 		.fn = fn,
22806b44003eSAndrew Morton 		.arg = arg,
22816b44003eSAndrew Morton 	};
22822d3854a3SRusty Russell 
22836b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
22846b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
22856b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
22866b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
22876b44003eSAndrew Morton 	wake_up_process(sub_thread);
22886b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
22892d3854a3SRusty Russell 	return wfc.ret;
22902d3854a3SRusty Russell }
22912d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
22922d3854a3SRusty Russell #endif /* CONFIG_SMP */
22932d3854a3SRusty Russell 
2294a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
2295a0a1a5fdSTejun Heo 
2296a0a1a5fdSTejun Heo /**
2297a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
2298a0a1a5fdSTejun Heo  *
2299a0a1a5fdSTejun Heo  * Start freezing workqueues.  After this function returns, all
2300a0a1a5fdSTejun Heo  * freezeable workqueues will queue new works to their frozen_works
2301a0a1a5fdSTejun Heo  * list instead of the cwq ones.
2302a0a1a5fdSTejun Heo  *
2303a0a1a5fdSTejun Heo  * CONTEXT:
23048b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
2305a0a1a5fdSTejun Heo  */
2306a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
2307a0a1a5fdSTejun Heo {
2308a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
2309a0a1a5fdSTejun Heo 	unsigned int cpu;
2310a0a1a5fdSTejun Heo 
2311a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
2312a0a1a5fdSTejun Heo 
2313a0a1a5fdSTejun Heo 	BUG_ON(workqueue_freezing);
2314a0a1a5fdSTejun Heo 	workqueue_freezing = true;
2315a0a1a5fdSTejun Heo 
2316a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
23178b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
23188b03ae3cSTejun Heo 
23198b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
23208b03ae3cSTejun Heo 
2321db7bccf4STejun Heo 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
2322db7bccf4STejun Heo 		gcwq->flags |= GCWQ_FREEZING;
2323db7bccf4STejun Heo 
2324a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
2325a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2326a0a1a5fdSTejun Heo 
2327a0a1a5fdSTejun Heo 			if (wq->flags & WQ_FREEZEABLE)
2328a0a1a5fdSTejun Heo 				cwq->max_active = 0;
2329a0a1a5fdSTejun Heo 		}
23308b03ae3cSTejun Heo 
23318b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2332a0a1a5fdSTejun Heo 	}
2333a0a1a5fdSTejun Heo 
2334a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
2335a0a1a5fdSTejun Heo }
2336a0a1a5fdSTejun Heo 
2337a0a1a5fdSTejun Heo /**
2338a0a1a5fdSTejun Heo  * freeze_workqueues_busy - are freezeable workqueues still busy?
2339a0a1a5fdSTejun Heo  *
2340a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
2341a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
2342a0a1a5fdSTejun Heo  *
2343a0a1a5fdSTejun Heo  * CONTEXT:
2344a0a1a5fdSTejun Heo  * Grabs and releases workqueue_lock.
2345a0a1a5fdSTejun Heo  *
2346a0a1a5fdSTejun Heo  * RETURNS:
2347a0a1a5fdSTejun Heo  * %true if some freezeable workqueues are still busy.  %false if
2348a0a1a5fdSTejun Heo  * freezing is complete.
2349a0a1a5fdSTejun Heo  */
2350a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
2351a0a1a5fdSTejun Heo {
2352a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
2353a0a1a5fdSTejun Heo 	unsigned int cpu;
2354a0a1a5fdSTejun Heo 	bool busy = false;
2355a0a1a5fdSTejun Heo 
2356a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
2357a0a1a5fdSTejun Heo 
2358a0a1a5fdSTejun Heo 	BUG_ON(!workqueue_freezing);
2359a0a1a5fdSTejun Heo 
2360a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
2361a0a1a5fdSTejun Heo 		/*
2362a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
2363a0a1a5fdSTejun Heo 		 * to peek without lock.
2364a0a1a5fdSTejun Heo 		 */
2365a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
2366a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2367a0a1a5fdSTejun Heo 
2368a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
2369a0a1a5fdSTejun Heo 				continue;
2370a0a1a5fdSTejun Heo 
2371a0a1a5fdSTejun Heo 			BUG_ON(cwq->nr_active < 0);
2372a0a1a5fdSTejun Heo 			if (cwq->nr_active) {
2373a0a1a5fdSTejun Heo 				busy = true;
2374a0a1a5fdSTejun Heo 				goto out_unlock;
2375a0a1a5fdSTejun Heo 			}
2376a0a1a5fdSTejun Heo 		}
2377a0a1a5fdSTejun Heo 	}
2378a0a1a5fdSTejun Heo out_unlock:
2379a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
2380a0a1a5fdSTejun Heo 	return busy;
2381a0a1a5fdSTejun Heo }
2382a0a1a5fdSTejun Heo 
2383a0a1a5fdSTejun Heo /**
2384a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
2385a0a1a5fdSTejun Heo  *
2386a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
2387a0a1a5fdSTejun Heo  * frozen works are transferred to their respective cwq worklists.
2388a0a1a5fdSTejun Heo  *
2389a0a1a5fdSTejun Heo  * CONTEXT:
23908b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
2391a0a1a5fdSTejun Heo  */
2392a0a1a5fdSTejun Heo void thaw_workqueues(void)
2393a0a1a5fdSTejun Heo {
2394a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
2395a0a1a5fdSTejun Heo 	unsigned int cpu;
2396a0a1a5fdSTejun Heo 
2397a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
2398a0a1a5fdSTejun Heo 
2399a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
2400a0a1a5fdSTejun Heo 		goto out_unlock;
2401a0a1a5fdSTejun Heo 
2402a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
24038b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
24048b03ae3cSTejun Heo 
24058b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
24068b03ae3cSTejun Heo 
2407db7bccf4STejun Heo 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
2408db7bccf4STejun Heo 		gcwq->flags &= ~GCWQ_FREEZING;
2409db7bccf4STejun Heo 
2410a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
2411a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2412a0a1a5fdSTejun Heo 
2413a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
2414a0a1a5fdSTejun Heo 				continue;
2415a0a1a5fdSTejun Heo 
2416a0a1a5fdSTejun Heo 			/* restore max_active and repopulate worklist */
2417a0a1a5fdSTejun Heo 			cwq->max_active = wq->saved_max_active;
2418a0a1a5fdSTejun Heo 
2419a0a1a5fdSTejun Heo 			while (!list_empty(&cwq->delayed_works) &&
2420a0a1a5fdSTejun Heo 			       cwq->nr_active < cwq->max_active)
2421a0a1a5fdSTejun Heo 				cwq_activate_first_delayed(cwq);
2422a0a1a5fdSTejun Heo 
2423502ca9d8STejun Heo 			/* perform delayed unbind from single cpu if empty */
2424502ca9d8STejun Heo 			if (wq->single_cpu == gcwq->cpu &&
2425502ca9d8STejun Heo 			    !cwq->nr_active && list_empty(&cwq->delayed_works))
2426502ca9d8STejun Heo 				cwq_unbind_single_cpu(cwq);
2427502ca9d8STejun Heo 
2428c8e55f36STejun Heo 			wake_up_process(cwq->worker->task);
2429a0a1a5fdSTejun Heo 		}
24308b03ae3cSTejun Heo 
24318b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2432a0a1a5fdSTejun Heo 	}
2433a0a1a5fdSTejun Heo 
2434a0a1a5fdSTejun Heo 	workqueue_freezing = false;
2435a0a1a5fdSTejun Heo out_unlock:
2436a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
2437a0a1a5fdSTejun Heo }
2438a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
2439a0a1a5fdSTejun Heo 
2440c12920d1SOleg Nesterov void __init init_workqueues(void)
24411da177e4SLinus Torvalds {
2442c34056a3STejun Heo 	unsigned int cpu;
2443c8e55f36STejun Heo 	int i;
2444c34056a3STejun Heo 
2445*7a22ad75STejun Heo 	/*
2446*7a22ad75STejun Heo 	 * The pointer part of work->data is either pointing to the
2447*7a22ad75STejun Heo 	 * cwq or contains the cpu number the work ran last on.  Make
2448*7a22ad75STejun Heo 	 * sure cpu number won't overflow into kernel pointer area so
2449*7a22ad75STejun Heo 	 * that they can be distinguished.
2450*7a22ad75STejun Heo 	 */
2451*7a22ad75STejun Heo 	BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
2452*7a22ad75STejun Heo 
2453db7bccf4STejun Heo 	hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
24548b03ae3cSTejun Heo 
24558b03ae3cSTejun Heo 	/* initialize gcwqs */
24568b03ae3cSTejun Heo 	for_each_possible_cpu(cpu) {
24578b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
24588b03ae3cSTejun Heo 
24598b03ae3cSTejun Heo 		spin_lock_init(&gcwq->lock);
24608b03ae3cSTejun Heo 		gcwq->cpu = cpu;
24618b03ae3cSTejun Heo 
2462c8e55f36STejun Heo 		INIT_LIST_HEAD(&gcwq->idle_list);
2463c8e55f36STejun Heo 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
2464c8e55f36STejun Heo 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
2465c8e55f36STejun Heo 
24668b03ae3cSTejun Heo 		ida_init(&gcwq->worker_ida);
2467db7bccf4STejun Heo 
2468db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_DONE;
2469db7bccf4STejun Heo 		init_waitqueue_head(&gcwq->trustee_wait);
24708b03ae3cSTejun Heo 	}
24718b03ae3cSTejun Heo 
24721da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
24731da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
24741da177e4SLinus Torvalds }
2475