xref: /linux-6.15/kernel/workqueue.c (revision 502ca9d8)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36c34056a3STejun Heo #include <linux/idr.h>
371da177e4SLinus Torvalds 
38c8e55f36STejun Heo enum {
39db7bccf4STejun Heo 	/* global_cwq flags */
40db7bccf4STejun Heo 	GCWQ_FREEZING		= 1 << 3,	/* freeze in progress */
41db7bccf4STejun Heo 
42c8e55f36STejun Heo 	/* worker flags */
43c8e55f36STejun Heo 	WORKER_STARTED		= 1 << 0,	/* started */
44c8e55f36STejun Heo 	WORKER_DIE		= 1 << 1,	/* die die die */
45c8e55f36STejun Heo 	WORKER_IDLE		= 1 << 2,	/* is idle */
46db7bccf4STejun Heo 	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
47db7bccf4STejun Heo 
48db7bccf4STejun Heo 	/* gcwq->trustee_state */
49db7bccf4STejun Heo 	TRUSTEE_START		= 0,		/* start */
50db7bccf4STejun Heo 	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
51db7bccf4STejun Heo 	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
52db7bccf4STejun Heo 	TRUSTEE_RELEASE		= 3,		/* release workers */
53db7bccf4STejun Heo 	TRUSTEE_DONE		= 4,		/* trustee is done */
54c8e55f36STejun Heo 
55c8e55f36STejun Heo 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
56c8e55f36STejun Heo 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
57c8e55f36STejun Heo 	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,
58db7bccf4STejun Heo 
59db7bccf4STejun Heo 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
60c8e55f36STejun Heo };
61c8e55f36STejun Heo 
621da177e4SLinus Torvalds /*
634690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
644690c4abSTejun Heo  *
654690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
664690c4abSTejun Heo  *
678b03ae3cSTejun Heo  * L: gcwq->lock protected.  Access with gcwq->lock held.
684690c4abSTejun Heo  *
6973f53c4aSTejun Heo  * F: wq->flush_mutex protected.
7073f53c4aSTejun Heo  *
714690c4abSTejun Heo  * W: workqueue_lock protected.
724690c4abSTejun Heo  */
734690c4abSTejun Heo 
748b03ae3cSTejun Heo struct global_cwq;
75c34056a3STejun Heo struct cpu_workqueue_struct;
76c34056a3STejun Heo 
77c34056a3STejun Heo struct worker {
78c8e55f36STejun Heo 	/* on idle list while idle, on busy hash table while busy */
79c8e55f36STejun Heo 	union {
80c8e55f36STejun Heo 		struct list_head	entry;	/* L: while idle */
81c8e55f36STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
82c8e55f36STejun Heo 	};
83c8e55f36STejun Heo 
84c34056a3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
85affee4b2STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
86c34056a3STejun Heo 	struct task_struct	*task;		/* I: worker task */
878b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
88c34056a3STejun Heo 	struct cpu_workqueue_struct *cwq;	/* I: the associated cwq */
89c8e55f36STejun Heo 	unsigned int		flags;		/* L: flags */
90c34056a3STejun Heo 	int			id;		/* I: worker id */
91c34056a3STejun Heo };
92c34056a3STejun Heo 
934690c4abSTejun Heo /*
948b03ae3cSTejun Heo  * Global per-cpu workqueue.
958b03ae3cSTejun Heo  */
968b03ae3cSTejun Heo struct global_cwq {
978b03ae3cSTejun Heo 	spinlock_t		lock;		/* the gcwq lock */
988b03ae3cSTejun Heo 	unsigned int		cpu;		/* I: the associated cpu */
99db7bccf4STejun Heo 	unsigned int		flags;		/* L: GCWQ_* flags */
100c8e55f36STejun Heo 
101c8e55f36STejun Heo 	int			nr_workers;	/* L: total number of workers */
102c8e55f36STejun Heo 	int			nr_idle;	/* L: currently idle ones */
103c8e55f36STejun Heo 
104c8e55f36STejun Heo 	/* workers are chained either in the idle_list or busy_hash */
105c8e55f36STejun Heo 	struct list_head	idle_list;	/* L: list of idle workers */
106c8e55f36STejun Heo 	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
107c8e55f36STejun Heo 						/* L: hash of busy workers */
108c8e55f36STejun Heo 
1098b03ae3cSTejun Heo 	struct ida		worker_ida;	/* L: for worker IDs */
110db7bccf4STejun Heo 
111db7bccf4STejun Heo 	struct task_struct	*trustee;	/* L: for gcwq shutdown */
112db7bccf4STejun Heo 	unsigned int		trustee_state;	/* L: trustee state */
113db7bccf4STejun Heo 	wait_queue_head_t	trustee_wait;	/* trustee wait */
1148b03ae3cSTejun Heo } ____cacheline_aligned_in_smp;
1158b03ae3cSTejun Heo 
1168b03ae3cSTejun Heo /*
117*502ca9d8STejun Heo  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
1180f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
1190f900049STejun Heo  * aligned at two's power of the number of flag bits.
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds struct cpu_workqueue_struct {
1228b03ae3cSTejun Heo 	struct global_cwq	*gcwq;		/* I: the associated gcwq */
1231da177e4SLinus Torvalds 	struct list_head worklist;
124c34056a3STejun Heo 	struct worker		*worker;
1254690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
12673f53c4aSTejun Heo 	int			work_color;	/* L: current color */
12773f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
12873f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
12973f53c4aSTejun Heo 						/* L: nr of in_flight works */
1301e19ffc6STejun Heo 	int			nr_active;	/* L: nr of active works */
131a0a1a5fdSTejun Heo 	int			max_active;	/* L: max active works */
1321e19ffc6STejun Heo 	struct list_head	delayed_works;	/* L: delayed works */
1330f900049STejun Heo };
1341da177e4SLinus Torvalds 
1351da177e4SLinus Torvalds /*
13673f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
13773f53c4aSTejun Heo  */
13873f53c4aSTejun Heo struct wq_flusher {
13973f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
14073f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
14173f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
14273f53c4aSTejun Heo };
14373f53c4aSTejun Heo 
14473f53c4aSTejun Heo /*
1451da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
1461da177e4SLinus Torvalds  * per-CPU workqueues:
1471da177e4SLinus Torvalds  */
1481da177e4SLinus Torvalds struct workqueue_struct {
14997e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
1504690c4abSTejun Heo 	struct cpu_workqueue_struct *cpu_wq;	/* I: cwq's */
1514690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
15273f53c4aSTejun Heo 
15373f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
15473f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
15573f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
15673f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
15773f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
15873f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
15973f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
16073f53c4aSTejun Heo 
161*502ca9d8STejun Heo 	unsigned long		single_cpu;	/* cpu for single cpu wq */
162*502ca9d8STejun Heo 
163a0a1a5fdSTejun Heo 	int			saved_max_active; /* I: saved cwq max_active */
1644690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
1654e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
1664e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
1674e6045f1SJohannes Berg #endif
1681da177e4SLinus Torvalds };
1691da177e4SLinus Torvalds 
170db7bccf4STejun Heo #define for_each_busy_worker(worker, i, pos, gcwq)			\
171db7bccf4STejun Heo 	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
172db7bccf4STejun Heo 		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
173db7bccf4STejun Heo 
174dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
175dc186ad7SThomas Gleixner 
176dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
177dc186ad7SThomas Gleixner 
178dc186ad7SThomas Gleixner /*
179dc186ad7SThomas Gleixner  * fixup_init is called when:
180dc186ad7SThomas Gleixner  * - an active object is initialized
181dc186ad7SThomas Gleixner  */
182dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
183dc186ad7SThomas Gleixner {
184dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
185dc186ad7SThomas Gleixner 
186dc186ad7SThomas Gleixner 	switch (state) {
187dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
188dc186ad7SThomas Gleixner 		cancel_work_sync(work);
189dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
190dc186ad7SThomas Gleixner 		return 1;
191dc186ad7SThomas Gleixner 	default:
192dc186ad7SThomas Gleixner 		return 0;
193dc186ad7SThomas Gleixner 	}
194dc186ad7SThomas Gleixner }
195dc186ad7SThomas Gleixner 
196dc186ad7SThomas Gleixner /*
197dc186ad7SThomas Gleixner  * fixup_activate is called when:
198dc186ad7SThomas Gleixner  * - an active object is activated
199dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
200dc186ad7SThomas Gleixner  */
201dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
202dc186ad7SThomas Gleixner {
203dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
204dc186ad7SThomas Gleixner 
205dc186ad7SThomas Gleixner 	switch (state) {
206dc186ad7SThomas Gleixner 
207dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
208dc186ad7SThomas Gleixner 		/*
209dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
210dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
211dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
212dc186ad7SThomas Gleixner 		 */
21322df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
214dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
215dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
216dc186ad7SThomas Gleixner 			return 0;
217dc186ad7SThomas Gleixner 		}
218dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
219dc186ad7SThomas Gleixner 		return 0;
220dc186ad7SThomas Gleixner 
221dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
222dc186ad7SThomas Gleixner 		WARN_ON(1);
223dc186ad7SThomas Gleixner 
224dc186ad7SThomas Gleixner 	default:
225dc186ad7SThomas Gleixner 		return 0;
226dc186ad7SThomas Gleixner 	}
227dc186ad7SThomas Gleixner }
228dc186ad7SThomas Gleixner 
229dc186ad7SThomas Gleixner /*
230dc186ad7SThomas Gleixner  * fixup_free is called when:
231dc186ad7SThomas Gleixner  * - an active object is freed
232dc186ad7SThomas Gleixner  */
233dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
234dc186ad7SThomas Gleixner {
235dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
236dc186ad7SThomas Gleixner 
237dc186ad7SThomas Gleixner 	switch (state) {
238dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
239dc186ad7SThomas Gleixner 		cancel_work_sync(work);
240dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
241dc186ad7SThomas Gleixner 		return 1;
242dc186ad7SThomas Gleixner 	default:
243dc186ad7SThomas Gleixner 		return 0;
244dc186ad7SThomas Gleixner 	}
245dc186ad7SThomas Gleixner }
246dc186ad7SThomas Gleixner 
247dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
248dc186ad7SThomas Gleixner 	.name		= "work_struct",
249dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
250dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
251dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
252dc186ad7SThomas Gleixner };
253dc186ad7SThomas Gleixner 
254dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
255dc186ad7SThomas Gleixner {
256dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
257dc186ad7SThomas Gleixner }
258dc186ad7SThomas Gleixner 
259dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
260dc186ad7SThomas Gleixner {
261dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
262dc186ad7SThomas Gleixner }
263dc186ad7SThomas Gleixner 
264dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
265dc186ad7SThomas Gleixner {
266dc186ad7SThomas Gleixner 	if (onstack)
267dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
268dc186ad7SThomas Gleixner 	else
269dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
270dc186ad7SThomas Gleixner }
271dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
272dc186ad7SThomas Gleixner 
273dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
274dc186ad7SThomas Gleixner {
275dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
276dc186ad7SThomas Gleixner }
277dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
278dc186ad7SThomas Gleixner 
279dc186ad7SThomas Gleixner #else
280dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
281dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
282dc186ad7SThomas Gleixner #endif
283dc186ad7SThomas Gleixner 
28495402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
28595402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
2861da177e4SLinus Torvalds static LIST_HEAD(workqueues);
287a0a1a5fdSTejun Heo static bool workqueue_freezing;		/* W: have wqs started freezing? */
288c34056a3STejun Heo 
2898b03ae3cSTejun Heo static DEFINE_PER_CPU(struct global_cwq, global_cwq);
2908b03ae3cSTejun Heo 
291c34056a3STejun Heo static int worker_thread(void *__worker);
2921da177e4SLinus Torvalds 
2938b03ae3cSTejun Heo static struct global_cwq *get_gcwq(unsigned int cpu)
2948b03ae3cSTejun Heo {
2958b03ae3cSTejun Heo 	return &per_cpu(global_cwq, cpu);
2968b03ae3cSTejun Heo }
2978b03ae3cSTejun Heo 
2984690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
2994690c4abSTejun Heo 					    struct workqueue_struct *wq)
300a848e3b6SOleg Nesterov {
301a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
302a848e3b6SOleg Nesterov }
303a848e3b6SOleg Nesterov 
30473f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
30573f53c4aSTejun Heo {
30673f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
30773f53c4aSTejun Heo }
30873f53c4aSTejun Heo 
30973f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
31073f53c4aSTejun Heo {
31173f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
31273f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
31373f53c4aSTejun Heo }
31473f53c4aSTejun Heo 
31573f53c4aSTejun Heo static int work_next_color(int color)
31673f53c4aSTejun Heo {
31773f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
31873f53c4aSTejun Heo }
31973f53c4aSTejun Heo 
3204594bf15SDavid Howells /*
3214594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
3224594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
3234594bf15SDavid Howells  */
324ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
3254690c4abSTejun Heo 			       struct cpu_workqueue_struct *cwq,
3264690c4abSTejun Heo 			       unsigned long extra_flags)
327365970a1SDavid Howells {
3284594bf15SDavid Howells 	BUG_ON(!work_pending(work));
3294594bf15SDavid Howells 
3304690c4abSTejun Heo 	atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
33122df02bbSTejun Heo 			WORK_STRUCT_PENDING | extra_flags);
332365970a1SDavid Howells }
333365970a1SDavid Howells 
3344d707b9fSOleg Nesterov /*
3354d707b9fSOleg Nesterov  * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
3364d707b9fSOleg Nesterov  */
3374d707b9fSOleg Nesterov static inline void clear_wq_data(struct work_struct *work)
3384d707b9fSOleg Nesterov {
3394690c4abSTejun Heo 	atomic_long_set(&work->data, work_static(work));
3404d707b9fSOleg Nesterov }
3414d707b9fSOleg Nesterov 
34264166699STejun Heo static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
343365970a1SDavid Howells {
34464166699STejun Heo 	return (void *)(atomic_long_read(&work->data) &
34564166699STejun Heo 			WORK_STRUCT_WQ_DATA_MASK);
346365970a1SDavid Howells }
347365970a1SDavid Howells 
3484690c4abSTejun Heo /**
349c8e55f36STejun Heo  * busy_worker_head - return the busy hash head for a work
350c8e55f36STejun Heo  * @gcwq: gcwq of interest
351c8e55f36STejun Heo  * @work: work to be hashed
352c8e55f36STejun Heo  *
353c8e55f36STejun Heo  * Return hash head of @gcwq for @work.
354c8e55f36STejun Heo  *
355c8e55f36STejun Heo  * CONTEXT:
356c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
357c8e55f36STejun Heo  *
358c8e55f36STejun Heo  * RETURNS:
359c8e55f36STejun Heo  * Pointer to the hash head.
360c8e55f36STejun Heo  */
361c8e55f36STejun Heo static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
362c8e55f36STejun Heo 					   struct work_struct *work)
363c8e55f36STejun Heo {
364c8e55f36STejun Heo 	const int base_shift = ilog2(sizeof(struct work_struct));
365c8e55f36STejun Heo 	unsigned long v = (unsigned long)work;
366c8e55f36STejun Heo 
367c8e55f36STejun Heo 	/* simple shift and fold hash, do we need something better? */
368c8e55f36STejun Heo 	v >>= base_shift;
369c8e55f36STejun Heo 	v += v >> BUSY_WORKER_HASH_ORDER;
370c8e55f36STejun Heo 	v &= BUSY_WORKER_HASH_MASK;
371c8e55f36STejun Heo 
372c8e55f36STejun Heo 	return &gcwq->busy_hash[v];
373c8e55f36STejun Heo }
374c8e55f36STejun Heo 
375c8e55f36STejun Heo /**
3764690c4abSTejun Heo  * insert_work - insert a work into cwq
3774690c4abSTejun Heo  * @cwq: cwq @work belongs to
3784690c4abSTejun Heo  * @work: work to insert
3794690c4abSTejun Heo  * @head: insertion point
3804690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
3814690c4abSTejun Heo  *
3824690c4abSTejun Heo  * Insert @work into @cwq after @head.
3834690c4abSTejun Heo  *
3844690c4abSTejun Heo  * CONTEXT:
3858b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
3864690c4abSTejun Heo  */
387b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
3884690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
3894690c4abSTejun Heo 			unsigned int extra_flags)
390b89deed3SOleg Nesterov {
3914690c4abSTejun Heo 	/* we own @work, set data and link */
3924690c4abSTejun Heo 	set_wq_data(work, cwq, extra_flags);
3934690c4abSTejun Heo 
3946e84d644SOleg Nesterov 	/*
3956e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
3966e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
3976e84d644SOleg Nesterov 	 */
3986e84d644SOleg Nesterov 	smp_wmb();
3994690c4abSTejun Heo 
4001a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
401c8e55f36STejun Heo 	wake_up_process(cwq->worker->task);
402b89deed3SOleg Nesterov }
403b89deed3SOleg Nesterov 
404*502ca9d8STejun Heo /**
405*502ca9d8STejun Heo  * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
406*502ca9d8STejun Heo  * @cwq: cwq to unbind
407*502ca9d8STejun Heo  *
408*502ca9d8STejun Heo  * Try to unbind @cwq from single cpu workqueue processing.  If
409*502ca9d8STejun Heo  * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
410*502ca9d8STejun Heo  *
411*502ca9d8STejun Heo  * CONTEXT:
412*502ca9d8STejun Heo  * spin_lock_irq(gcwq->lock).
413*502ca9d8STejun Heo  */
414*502ca9d8STejun Heo static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
415*502ca9d8STejun Heo {
416*502ca9d8STejun Heo 	struct workqueue_struct *wq = cwq->wq;
417*502ca9d8STejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
418*502ca9d8STejun Heo 
419*502ca9d8STejun Heo 	BUG_ON(wq->single_cpu != gcwq->cpu);
420*502ca9d8STejun Heo 	/*
421*502ca9d8STejun Heo 	 * Unbind from workqueue if @cwq is not frozen.  If frozen,
422*502ca9d8STejun Heo 	 * thaw_workqueues() will either restart processing on this
423*502ca9d8STejun Heo 	 * cpu or unbind if empty.  This keeps works queued while
424*502ca9d8STejun Heo 	 * frozen fully ordered and flushable.
425*502ca9d8STejun Heo 	 */
426*502ca9d8STejun Heo 	if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
427*502ca9d8STejun Heo 		smp_wmb();	/* paired with cmpxchg() in __queue_work() */
428*502ca9d8STejun Heo 		wq->single_cpu = NR_CPUS;
429*502ca9d8STejun Heo 	}
430*502ca9d8STejun Heo }
431*502ca9d8STejun Heo 
4324690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
4331da177e4SLinus Torvalds 			 struct work_struct *work)
4341da177e4SLinus Torvalds {
435*502ca9d8STejun Heo 	struct global_cwq *gcwq;
436*502ca9d8STejun Heo 	struct cpu_workqueue_struct *cwq;
4371e19ffc6STejun Heo 	struct list_head *worklist;
4381da177e4SLinus Torvalds 	unsigned long flags;
439*502ca9d8STejun Heo 	bool arbitrate;
4401da177e4SLinus Torvalds 
441dc186ad7SThomas Gleixner 	debug_work_activate(work);
4421e19ffc6STejun Heo 
443*502ca9d8STejun Heo 	/* determine gcwq to use */
444*502ca9d8STejun Heo 	if (!(wq->flags & WQ_SINGLE_CPU)) {
445*502ca9d8STejun Heo 		/* just use the requested cpu for multicpu workqueues */
446*502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
4478b03ae3cSTejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
448*502ca9d8STejun Heo 	} else {
449*502ca9d8STejun Heo 		unsigned int req_cpu = cpu;
450*502ca9d8STejun Heo 
451*502ca9d8STejun Heo 		/*
452*502ca9d8STejun Heo 		 * It's a bit more complex for single cpu workqueues.
453*502ca9d8STejun Heo 		 * We first need to determine which cpu is going to be
454*502ca9d8STejun Heo 		 * used.  If no cpu is currently serving this
455*502ca9d8STejun Heo 		 * workqueue, arbitrate using atomic accesses to
456*502ca9d8STejun Heo 		 * wq->single_cpu; otherwise, use the current one.
457*502ca9d8STejun Heo 		 */
458*502ca9d8STejun Heo 	retry:
459*502ca9d8STejun Heo 		cpu = wq->single_cpu;
460*502ca9d8STejun Heo 		arbitrate = cpu == NR_CPUS;
461*502ca9d8STejun Heo 		if (arbitrate)
462*502ca9d8STejun Heo 			cpu = req_cpu;
463*502ca9d8STejun Heo 
464*502ca9d8STejun Heo 		gcwq = get_gcwq(cpu);
465*502ca9d8STejun Heo 		spin_lock_irqsave(&gcwq->lock, flags);
466*502ca9d8STejun Heo 
467*502ca9d8STejun Heo 		/*
468*502ca9d8STejun Heo 		 * The following cmpxchg() is a full barrier paired
469*502ca9d8STejun Heo 		 * with smp_wmb() in cwq_unbind_single_cpu() and
470*502ca9d8STejun Heo 		 * guarantees that all changes to wq->st_* fields are
471*502ca9d8STejun Heo 		 * visible on the new cpu after this point.
472*502ca9d8STejun Heo 		 */
473*502ca9d8STejun Heo 		if (arbitrate)
474*502ca9d8STejun Heo 			cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
475*502ca9d8STejun Heo 
476*502ca9d8STejun Heo 		if (unlikely(wq->single_cpu != cpu)) {
477*502ca9d8STejun Heo 			spin_unlock_irqrestore(&gcwq->lock, flags);
478*502ca9d8STejun Heo 			goto retry;
479*502ca9d8STejun Heo 		}
480*502ca9d8STejun Heo 	}
481*502ca9d8STejun Heo 
482*502ca9d8STejun Heo 	/* gcwq determined, get cwq and queue */
483*502ca9d8STejun Heo 	cwq = get_cwq(gcwq->cpu, wq);
484*502ca9d8STejun Heo 
4854690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
4861e19ffc6STejun Heo 
48773f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
4881e19ffc6STejun Heo 
4891e19ffc6STejun Heo 	if (likely(cwq->nr_active < cwq->max_active)) {
4901e19ffc6STejun Heo 		cwq->nr_active++;
4911e19ffc6STejun Heo 		worklist = &cwq->worklist;
4921e19ffc6STejun Heo 	} else
4931e19ffc6STejun Heo 		worklist = &cwq->delayed_works;
4941e19ffc6STejun Heo 
4951e19ffc6STejun Heo 	insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
4961e19ffc6STejun Heo 
4978b03ae3cSTejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
4981da177e4SLinus Torvalds }
4991da177e4SLinus Torvalds 
5000fcb78c2SRolf Eike Beer /**
5010fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
5020fcb78c2SRolf Eike Beer  * @wq: workqueue to use
5030fcb78c2SRolf Eike Beer  * @work: work to queue
5040fcb78c2SRolf Eike Beer  *
505057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
5061da177e4SLinus Torvalds  *
50700dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
50800dfcaf7SOleg Nesterov  * it can be processed by another CPU.
5091da177e4SLinus Torvalds  */
5107ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
5111da177e4SLinus Torvalds {
512ef1ca236SOleg Nesterov 	int ret;
5131da177e4SLinus Torvalds 
514ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
515a848e3b6SOleg Nesterov 	put_cpu();
516ef1ca236SOleg Nesterov 
5171da177e4SLinus Torvalds 	return ret;
5181da177e4SLinus Torvalds }
519ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
5201da177e4SLinus Torvalds 
521c1a220e7SZhang Rui /**
522c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
523c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
524c1a220e7SZhang Rui  * @wq: workqueue to use
525c1a220e7SZhang Rui  * @work: work to queue
526c1a220e7SZhang Rui  *
527c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
528c1a220e7SZhang Rui  *
529c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
530c1a220e7SZhang Rui  * can't go away.
531c1a220e7SZhang Rui  */
532c1a220e7SZhang Rui int
533c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
534c1a220e7SZhang Rui {
535c1a220e7SZhang Rui 	int ret = 0;
536c1a220e7SZhang Rui 
53722df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
5384690c4abSTejun Heo 		__queue_work(cpu, wq, work);
539c1a220e7SZhang Rui 		ret = 1;
540c1a220e7SZhang Rui 	}
541c1a220e7SZhang Rui 	return ret;
542c1a220e7SZhang Rui }
543c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
544c1a220e7SZhang Rui 
5456d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
5461da177e4SLinus Torvalds {
54752bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
548ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
5491da177e4SLinus Torvalds 
5504690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
5511da177e4SLinus Torvalds }
5521da177e4SLinus Torvalds 
5530fcb78c2SRolf Eike Beer /**
5540fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
5550fcb78c2SRolf Eike Beer  * @wq: workqueue to use
556af9997e4SRandy Dunlap  * @dwork: delayable work to queue
5570fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
5580fcb78c2SRolf Eike Beer  *
559057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
5600fcb78c2SRolf Eike Beer  */
5617ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
56252bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
5631da177e4SLinus Torvalds {
56452bad64dSDavid Howells 	if (delay == 0)
56563bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
5661da177e4SLinus Torvalds 
56763bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
5681da177e4SLinus Torvalds }
569ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
5701da177e4SLinus Torvalds 
5710fcb78c2SRolf Eike Beer /**
5720fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
5730fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
5740fcb78c2SRolf Eike Beer  * @wq: workqueue to use
575af9997e4SRandy Dunlap  * @dwork: work to queue
5760fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
5770fcb78c2SRolf Eike Beer  *
578057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
5790fcb78c2SRolf Eike Beer  */
5807a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
58152bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
5827a6bc1cdSVenkatesh Pallipadi {
5837a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
58452bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
58552bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
5867a6bc1cdSVenkatesh Pallipadi 
58722df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
5887a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
5897a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
5907a6bc1cdSVenkatesh Pallipadi 
5918a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
5928a3e77ccSAndrew Liu 
593ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
594*502ca9d8STejun Heo 		set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0);
5957a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
59652bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
5977a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
59863bc0362SOleg Nesterov 
59963bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
6007a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
60163bc0362SOleg Nesterov 		else
60263bc0362SOleg Nesterov 			add_timer(timer);
6037a6bc1cdSVenkatesh Pallipadi 		ret = 1;
6047a6bc1cdSVenkatesh Pallipadi 	}
6057a6bc1cdSVenkatesh Pallipadi 	return ret;
6067a6bc1cdSVenkatesh Pallipadi }
607ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
6081da177e4SLinus Torvalds 
609c8e55f36STejun Heo /**
610c8e55f36STejun Heo  * worker_enter_idle - enter idle state
611c8e55f36STejun Heo  * @worker: worker which is entering idle state
612c8e55f36STejun Heo  *
613c8e55f36STejun Heo  * @worker is entering idle state.  Update stats and idle timer if
614c8e55f36STejun Heo  * necessary.
615c8e55f36STejun Heo  *
616c8e55f36STejun Heo  * LOCKING:
617c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
618c8e55f36STejun Heo  */
619c8e55f36STejun Heo static void worker_enter_idle(struct worker *worker)
620c8e55f36STejun Heo {
621c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
622c8e55f36STejun Heo 
623c8e55f36STejun Heo 	BUG_ON(worker->flags & WORKER_IDLE);
624c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->entry) &&
625c8e55f36STejun Heo 	       (worker->hentry.next || worker->hentry.pprev));
626c8e55f36STejun Heo 
627c8e55f36STejun Heo 	worker->flags |= WORKER_IDLE;
628c8e55f36STejun Heo 	gcwq->nr_idle++;
629c8e55f36STejun Heo 
630c8e55f36STejun Heo 	/* idle_list is LIFO */
631c8e55f36STejun Heo 	list_add(&worker->entry, &gcwq->idle_list);
632db7bccf4STejun Heo 
633db7bccf4STejun Heo 	if (unlikely(worker->flags & WORKER_ROGUE))
634db7bccf4STejun Heo 		wake_up_all(&gcwq->trustee_wait);
635c8e55f36STejun Heo }
636c8e55f36STejun Heo 
637c8e55f36STejun Heo /**
638c8e55f36STejun Heo  * worker_leave_idle - leave idle state
639c8e55f36STejun Heo  * @worker: worker which is leaving idle state
640c8e55f36STejun Heo  *
641c8e55f36STejun Heo  * @worker is leaving idle state.  Update stats.
642c8e55f36STejun Heo  *
643c8e55f36STejun Heo  * LOCKING:
644c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock).
645c8e55f36STejun Heo  */
646c8e55f36STejun Heo static void worker_leave_idle(struct worker *worker)
647c8e55f36STejun Heo {
648c8e55f36STejun Heo 	struct global_cwq *gcwq = worker->gcwq;
649c8e55f36STejun Heo 
650c8e55f36STejun Heo 	BUG_ON(!(worker->flags & WORKER_IDLE));
651c8e55f36STejun Heo 	worker->flags &= ~WORKER_IDLE;
652c8e55f36STejun Heo 	gcwq->nr_idle--;
653c8e55f36STejun Heo 	list_del_init(&worker->entry);
654c8e55f36STejun Heo }
655c8e55f36STejun Heo 
656c34056a3STejun Heo static struct worker *alloc_worker(void)
657c34056a3STejun Heo {
658c34056a3STejun Heo 	struct worker *worker;
659c34056a3STejun Heo 
660c34056a3STejun Heo 	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
661c8e55f36STejun Heo 	if (worker) {
662c8e55f36STejun Heo 		INIT_LIST_HEAD(&worker->entry);
663affee4b2STejun Heo 		INIT_LIST_HEAD(&worker->scheduled);
664c8e55f36STejun Heo 	}
665c34056a3STejun Heo 	return worker;
666c34056a3STejun Heo }
667c34056a3STejun Heo 
668c34056a3STejun Heo /**
669c34056a3STejun Heo  * create_worker - create a new workqueue worker
670c34056a3STejun Heo  * @cwq: cwq the new worker will belong to
671c34056a3STejun Heo  * @bind: whether to set affinity to @cpu or not
672c34056a3STejun Heo  *
673c34056a3STejun Heo  * Create a new worker which is bound to @cwq.  The returned worker
674c34056a3STejun Heo  * can be started by calling start_worker() or destroyed using
675c34056a3STejun Heo  * destroy_worker().
676c34056a3STejun Heo  *
677c34056a3STejun Heo  * CONTEXT:
678c34056a3STejun Heo  * Might sleep.  Does GFP_KERNEL allocations.
679c34056a3STejun Heo  *
680c34056a3STejun Heo  * RETURNS:
681c34056a3STejun Heo  * Pointer to the newly created worker.
682c34056a3STejun Heo  */
683c34056a3STejun Heo static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
684c34056a3STejun Heo {
6858b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
686c34056a3STejun Heo 	int id = -1;
687c34056a3STejun Heo 	struct worker *worker = NULL;
688c34056a3STejun Heo 
6898b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
6908b03ae3cSTejun Heo 	while (ida_get_new(&gcwq->worker_ida, &id)) {
6918b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
6928b03ae3cSTejun Heo 		if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
693c34056a3STejun Heo 			goto fail;
6948b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
695c34056a3STejun Heo 	}
6968b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
697c34056a3STejun Heo 
698c34056a3STejun Heo 	worker = alloc_worker();
699c34056a3STejun Heo 	if (!worker)
700c34056a3STejun Heo 		goto fail;
701c34056a3STejun Heo 
7028b03ae3cSTejun Heo 	worker->gcwq = gcwq;
703c34056a3STejun Heo 	worker->cwq = cwq;
704c34056a3STejun Heo 	worker->id = id;
705c34056a3STejun Heo 
706c34056a3STejun Heo 	worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
7078b03ae3cSTejun Heo 				      gcwq->cpu, id);
708c34056a3STejun Heo 	if (IS_ERR(worker->task))
709c34056a3STejun Heo 		goto fail;
710c34056a3STejun Heo 
711db7bccf4STejun Heo 	/*
712db7bccf4STejun Heo 	 * A rogue worker will become a regular one if CPU comes
713db7bccf4STejun Heo 	 * online later on.  Make sure every worker has
714db7bccf4STejun Heo 	 * PF_THREAD_BOUND set.
715db7bccf4STejun Heo 	 */
716c34056a3STejun Heo 	if (bind)
7178b03ae3cSTejun Heo 		kthread_bind(worker->task, gcwq->cpu);
718db7bccf4STejun Heo 	else
719db7bccf4STejun Heo 		worker->task->flags |= PF_THREAD_BOUND;
720c34056a3STejun Heo 
721c34056a3STejun Heo 	return worker;
722c34056a3STejun Heo fail:
723c34056a3STejun Heo 	if (id >= 0) {
7248b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
7258b03ae3cSTejun Heo 		ida_remove(&gcwq->worker_ida, id);
7268b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
727c34056a3STejun Heo 	}
728c34056a3STejun Heo 	kfree(worker);
729c34056a3STejun Heo 	return NULL;
730c34056a3STejun Heo }
731c34056a3STejun Heo 
732c34056a3STejun Heo /**
733c34056a3STejun Heo  * start_worker - start a newly created worker
734c34056a3STejun Heo  * @worker: worker to start
735c34056a3STejun Heo  *
736c8e55f36STejun Heo  * Make the gcwq aware of @worker and start it.
737c34056a3STejun Heo  *
738c34056a3STejun Heo  * CONTEXT:
7398b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
740c34056a3STejun Heo  */
741c34056a3STejun Heo static void start_worker(struct worker *worker)
742c34056a3STejun Heo {
743c8e55f36STejun Heo 	worker->flags |= WORKER_STARTED;
744c8e55f36STejun Heo 	worker->gcwq->nr_workers++;
745c8e55f36STejun Heo 	worker_enter_idle(worker);
746c34056a3STejun Heo 	wake_up_process(worker->task);
747c34056a3STejun Heo }
748c34056a3STejun Heo 
749c34056a3STejun Heo /**
750c34056a3STejun Heo  * destroy_worker - destroy a workqueue worker
751c34056a3STejun Heo  * @worker: worker to be destroyed
752c34056a3STejun Heo  *
753c8e55f36STejun Heo  * Destroy @worker and adjust @gcwq stats accordingly.
754c8e55f36STejun Heo  *
755c8e55f36STejun Heo  * CONTEXT:
756c8e55f36STejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
757c34056a3STejun Heo  */
758c34056a3STejun Heo static void destroy_worker(struct worker *worker)
759c34056a3STejun Heo {
7608b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
761c34056a3STejun Heo 	int id = worker->id;
762c34056a3STejun Heo 
763c34056a3STejun Heo 	/* sanity check frenzy */
764c34056a3STejun Heo 	BUG_ON(worker->current_work);
765affee4b2STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
766c34056a3STejun Heo 
767c8e55f36STejun Heo 	if (worker->flags & WORKER_STARTED)
768c8e55f36STejun Heo 		gcwq->nr_workers--;
769c8e55f36STejun Heo 	if (worker->flags & WORKER_IDLE)
770c8e55f36STejun Heo 		gcwq->nr_idle--;
771c8e55f36STejun Heo 
772c8e55f36STejun Heo 	list_del_init(&worker->entry);
773c8e55f36STejun Heo 	worker->flags |= WORKER_DIE;
774c8e55f36STejun Heo 
775c8e55f36STejun Heo 	spin_unlock_irq(&gcwq->lock);
776c8e55f36STejun Heo 
777c34056a3STejun Heo 	kthread_stop(worker->task);
778c34056a3STejun Heo 	kfree(worker);
779c34056a3STejun Heo 
7808b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
7818b03ae3cSTejun Heo 	ida_remove(&gcwq->worker_ida, id);
782c34056a3STejun Heo }
783c34056a3STejun Heo 
784a62428c0STejun Heo /**
785affee4b2STejun Heo  * move_linked_works - move linked works to a list
786affee4b2STejun Heo  * @work: start of series of works to be scheduled
787affee4b2STejun Heo  * @head: target list to append @work to
788affee4b2STejun Heo  * @nextp: out paramter for nested worklist walking
789affee4b2STejun Heo  *
790affee4b2STejun Heo  * Schedule linked works starting from @work to @head.  Work series to
791affee4b2STejun Heo  * be scheduled starts at @work and includes any consecutive work with
792affee4b2STejun Heo  * WORK_STRUCT_LINKED set in its predecessor.
793affee4b2STejun Heo  *
794affee4b2STejun Heo  * If @nextp is not NULL, it's updated to point to the next work of
795affee4b2STejun Heo  * the last scheduled work.  This allows move_linked_works() to be
796affee4b2STejun Heo  * nested inside outer list_for_each_entry_safe().
797affee4b2STejun Heo  *
798affee4b2STejun Heo  * CONTEXT:
7998b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
800affee4b2STejun Heo  */
801affee4b2STejun Heo static void move_linked_works(struct work_struct *work, struct list_head *head,
802affee4b2STejun Heo 			      struct work_struct **nextp)
803affee4b2STejun Heo {
804affee4b2STejun Heo 	struct work_struct *n;
805affee4b2STejun Heo 
806affee4b2STejun Heo 	/*
807affee4b2STejun Heo 	 * Linked worklist will always end before the end of the list,
808affee4b2STejun Heo 	 * use NULL for list head.
809affee4b2STejun Heo 	 */
810affee4b2STejun Heo 	list_for_each_entry_safe_from(work, n, NULL, entry) {
811affee4b2STejun Heo 		list_move_tail(&work->entry, head);
812affee4b2STejun Heo 		if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
813affee4b2STejun Heo 			break;
814affee4b2STejun Heo 	}
815affee4b2STejun Heo 
816affee4b2STejun Heo 	/*
817affee4b2STejun Heo 	 * If we're already inside safe list traversal and have moved
818affee4b2STejun Heo 	 * multiple works to the scheduled queue, the next position
819affee4b2STejun Heo 	 * needs to be updated.
820affee4b2STejun Heo 	 */
821affee4b2STejun Heo 	if (nextp)
822affee4b2STejun Heo 		*nextp = n;
823affee4b2STejun Heo }
824affee4b2STejun Heo 
8251e19ffc6STejun Heo static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
8261e19ffc6STejun Heo {
8271e19ffc6STejun Heo 	struct work_struct *work = list_first_entry(&cwq->delayed_works,
8281e19ffc6STejun Heo 						    struct work_struct, entry);
8291e19ffc6STejun Heo 
8301e19ffc6STejun Heo 	move_linked_works(work, &cwq->worklist, NULL);
8311e19ffc6STejun Heo 	cwq->nr_active++;
8321e19ffc6STejun Heo }
8331e19ffc6STejun Heo 
834affee4b2STejun Heo /**
83573f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
83673f53c4aSTejun Heo  * @cwq: cwq of interest
83773f53c4aSTejun Heo  * @color: color of work which left the queue
83873f53c4aSTejun Heo  *
83973f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
84073f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
84173f53c4aSTejun Heo  *
84273f53c4aSTejun Heo  * CONTEXT:
8438b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
84473f53c4aSTejun Heo  */
84573f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
84673f53c4aSTejun Heo {
84773f53c4aSTejun Heo 	/* ignore uncolored works */
84873f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
84973f53c4aSTejun Heo 		return;
85073f53c4aSTejun Heo 
85173f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
8521e19ffc6STejun Heo 	cwq->nr_active--;
8531e19ffc6STejun Heo 
854*502ca9d8STejun Heo 	if (!list_empty(&cwq->delayed_works)) {
8551e19ffc6STejun Heo 		/* one down, submit a delayed one */
856*502ca9d8STejun Heo 		if (cwq->nr_active < cwq->max_active)
8571e19ffc6STejun Heo 			cwq_activate_first_delayed(cwq);
858*502ca9d8STejun Heo 	} else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
859*502ca9d8STejun Heo 		/* this was the last work, unbind from single cpu */
860*502ca9d8STejun Heo 		cwq_unbind_single_cpu(cwq);
861*502ca9d8STejun Heo 	}
86273f53c4aSTejun Heo 
86373f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
86473f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
86573f53c4aSTejun Heo 		return;
86673f53c4aSTejun Heo 
86773f53c4aSTejun Heo 	/* are there still in-flight works? */
86873f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
86973f53c4aSTejun Heo 		return;
87073f53c4aSTejun Heo 
87173f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
87273f53c4aSTejun Heo 	cwq->flush_color = -1;
87373f53c4aSTejun Heo 
87473f53c4aSTejun Heo 	/*
87573f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
87673f53c4aSTejun Heo 	 * will handle the rest.
87773f53c4aSTejun Heo 	 */
87873f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
87973f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
88073f53c4aSTejun Heo }
88173f53c4aSTejun Heo 
88273f53c4aSTejun Heo /**
883a62428c0STejun Heo  * process_one_work - process single work
884c34056a3STejun Heo  * @worker: self
885a62428c0STejun Heo  * @work: work to process
886a62428c0STejun Heo  *
887a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
888a62428c0STejun Heo  * process a single work including synchronization against and
889a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
890a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
891a62428c0STejun Heo  * call this function to process a work.
892a62428c0STejun Heo  *
893a62428c0STejun Heo  * CONTEXT:
8948b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
895a62428c0STejun Heo  */
896c34056a3STejun Heo static void process_one_work(struct worker *worker, struct work_struct *work)
8971da177e4SLinus Torvalds {
898c34056a3STejun Heo 	struct cpu_workqueue_struct *cwq = worker->cwq;
8998b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
900c8e55f36STejun Heo 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
9016bb49e59SDavid Howells 	work_func_t f = work->func;
90273f53c4aSTejun Heo 	int work_color;
9034e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
9044e6045f1SJohannes Berg 	/*
905a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
906a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
907a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
908a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
909a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
9104e6045f1SJohannes Berg 	 */
9114e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
9124e6045f1SJohannes Berg #endif
913a62428c0STejun Heo 	/* claim and process */
914dc186ad7SThomas Gleixner 	debug_work_deactivate(work);
915c8e55f36STejun Heo 	hlist_add_head(&worker->hentry, bwh);
916c34056a3STejun Heo 	worker->current_work = work;
91773f53c4aSTejun Heo 	work_color = get_work_color(work);
918a62428c0STejun Heo 	list_del_init(&work->entry);
919a62428c0STejun Heo 
9208b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
9211da177e4SLinus Torvalds 
922365970a1SDavid Howells 	BUG_ON(get_wq_data(work) != cwq);
92323b2e599SOleg Nesterov 	work_clear_pending(work);
9243295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
9253295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
92665f27f38SDavid Howells 	f(work);
9273295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
9283295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
9291da177e4SLinus Torvalds 
930d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
931d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
932d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
933a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
934d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
935d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
936d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
937d5abe669SPeter Zijlstra 		dump_stack();
938d5abe669SPeter Zijlstra 	}
939d5abe669SPeter Zijlstra 
9408b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
941a62428c0STejun Heo 
942a62428c0STejun Heo 	/* we're done with it, release */
943c8e55f36STejun Heo 	hlist_del_init(&worker->hentry);
944c34056a3STejun Heo 	worker->current_work = NULL;
94573f53c4aSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color);
9461da177e4SLinus Torvalds }
947a62428c0STejun Heo 
948affee4b2STejun Heo /**
949affee4b2STejun Heo  * process_scheduled_works - process scheduled works
950affee4b2STejun Heo  * @worker: self
951affee4b2STejun Heo  *
952affee4b2STejun Heo  * Process all scheduled works.  Please note that the scheduled list
953affee4b2STejun Heo  * may change while processing a work, so this function repeatedly
954affee4b2STejun Heo  * fetches a work from the top and executes it.
955affee4b2STejun Heo  *
956affee4b2STejun Heo  * CONTEXT:
9578b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
958affee4b2STejun Heo  * multiple times.
959affee4b2STejun Heo  */
960affee4b2STejun Heo static void process_scheduled_works(struct worker *worker)
961a62428c0STejun Heo {
962affee4b2STejun Heo 	while (!list_empty(&worker->scheduled)) {
963affee4b2STejun Heo 		struct work_struct *work = list_first_entry(&worker->scheduled,
964a62428c0STejun Heo 						struct work_struct, entry);
965c34056a3STejun Heo 		process_one_work(worker, work);
966a62428c0STejun Heo 	}
9671da177e4SLinus Torvalds }
9681da177e4SLinus Torvalds 
9694690c4abSTejun Heo /**
9704690c4abSTejun Heo  * worker_thread - the worker thread function
971c34056a3STejun Heo  * @__worker: self
9724690c4abSTejun Heo  *
9734690c4abSTejun Heo  * The cwq worker thread function.
9744690c4abSTejun Heo  */
975c34056a3STejun Heo static int worker_thread(void *__worker)
9761da177e4SLinus Torvalds {
977c34056a3STejun Heo 	struct worker *worker = __worker;
9788b03ae3cSTejun Heo 	struct global_cwq *gcwq = worker->gcwq;
979c34056a3STejun Heo 	struct cpu_workqueue_struct *cwq = worker->cwq;
9801da177e4SLinus Torvalds 
981c8e55f36STejun Heo woke_up:
9828b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
983affee4b2STejun Heo 
984c8e55f36STejun Heo 	/* DIE can be set only while we're idle, checking here is enough */
985c8e55f36STejun Heo 	if (worker->flags & WORKER_DIE) {
986c8e55f36STejun Heo 		spin_unlock_irq(&gcwq->lock);
987c8e55f36STejun Heo 		return 0;
988c8e55f36STejun Heo 	}
989c8e55f36STejun Heo 
990c8e55f36STejun Heo 	worker_leave_idle(worker);
991db7bccf4STejun Heo recheck:
992c8e55f36STejun Heo 	/*
993c8e55f36STejun Heo 	 * ->scheduled list can only be filled while a worker is
994c8e55f36STejun Heo 	 * preparing to process a work or actually processing it.
995c8e55f36STejun Heo 	 * Make sure nobody diddled with it while I was sleeping.
996c8e55f36STejun Heo 	 */
997c8e55f36STejun Heo 	BUG_ON(!list_empty(&worker->scheduled));
998c8e55f36STejun Heo 
999affee4b2STejun Heo 	while (!list_empty(&cwq->worklist)) {
1000affee4b2STejun Heo 		struct work_struct *work =
1001affee4b2STejun Heo 			list_first_entry(&cwq->worklist,
1002affee4b2STejun Heo 					 struct work_struct, entry);
1003affee4b2STejun Heo 
1004db7bccf4STejun Heo 		/*
1005db7bccf4STejun Heo 		 * The following is a rather inefficient way to close
1006db7bccf4STejun Heo 		 * race window against cpu hotplug operations.  Will
1007db7bccf4STejun Heo 		 * be replaced soon.
1008db7bccf4STejun Heo 		 */
1009db7bccf4STejun Heo 		if (unlikely(!(worker->flags & WORKER_ROGUE) &&
1010db7bccf4STejun Heo 			     !cpumask_equal(&worker->task->cpus_allowed,
1011db7bccf4STejun Heo 					    get_cpu_mask(gcwq->cpu)))) {
1012db7bccf4STejun Heo 			spin_unlock_irq(&gcwq->lock);
1013db7bccf4STejun Heo 			set_cpus_allowed_ptr(worker->task,
1014db7bccf4STejun Heo 					     get_cpu_mask(gcwq->cpu));
1015db7bccf4STejun Heo 			cpu_relax();
1016db7bccf4STejun Heo 			spin_lock_irq(&gcwq->lock);
1017db7bccf4STejun Heo 			goto recheck;
1018db7bccf4STejun Heo 		}
1019db7bccf4STejun Heo 
1020c8e55f36STejun Heo 		if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1021affee4b2STejun Heo 			/* optimization path, not strictly necessary */
1022affee4b2STejun Heo 			process_one_work(worker, work);
1023affee4b2STejun Heo 			if (unlikely(!list_empty(&worker->scheduled)))
1024affee4b2STejun Heo 				process_scheduled_works(worker);
1025affee4b2STejun Heo 		} else {
1026c8e55f36STejun Heo 			move_linked_works(work, &worker->scheduled, NULL);
1027affee4b2STejun Heo 			process_scheduled_works(worker);
1028affee4b2STejun Heo 		}
1029affee4b2STejun Heo 	}
1030affee4b2STejun Heo 
1031c8e55f36STejun Heo 	/*
1032c8e55f36STejun Heo 	 * gcwq->lock is held and there's no work to process, sleep.
1033c8e55f36STejun Heo 	 * Workers are woken up only while holding gcwq->lock, so
1034c8e55f36STejun Heo 	 * setting the current state before releasing gcwq->lock is
1035c8e55f36STejun Heo 	 * enough to prevent losing any event.
1036c8e55f36STejun Heo 	 */
1037c8e55f36STejun Heo 	worker_enter_idle(worker);
1038c8e55f36STejun Heo 	__set_current_state(TASK_INTERRUPTIBLE);
10398b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1040c8e55f36STejun Heo 	schedule();
1041c8e55f36STejun Heo 	goto woke_up;
10421da177e4SLinus Torvalds }
10431da177e4SLinus Torvalds 
1044fc2e4d70SOleg Nesterov struct wq_barrier {
1045fc2e4d70SOleg Nesterov 	struct work_struct	work;
1046fc2e4d70SOleg Nesterov 	struct completion	done;
1047fc2e4d70SOleg Nesterov };
1048fc2e4d70SOleg Nesterov 
1049fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
1050fc2e4d70SOleg Nesterov {
1051fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1052fc2e4d70SOleg Nesterov 	complete(&barr->done);
1053fc2e4d70SOleg Nesterov }
1054fc2e4d70SOleg Nesterov 
10554690c4abSTejun Heo /**
10564690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
10574690c4abSTejun Heo  * @cwq: cwq to insert barrier into
10584690c4abSTejun Heo  * @barr: wq_barrier to insert
1059affee4b2STejun Heo  * @target: target work to attach @barr to
1060affee4b2STejun Heo  * @worker: worker currently executing @target, NULL if @target is not executing
10614690c4abSTejun Heo  *
1062affee4b2STejun Heo  * @barr is linked to @target such that @barr is completed only after
1063affee4b2STejun Heo  * @target finishes execution.  Please note that the ordering
1064affee4b2STejun Heo  * guarantee is observed only with respect to @target and on the local
1065affee4b2STejun Heo  * cpu.
1066affee4b2STejun Heo  *
1067affee4b2STejun Heo  * Currently, a queued barrier can't be canceled.  This is because
1068affee4b2STejun Heo  * try_to_grab_pending() can't determine whether the work to be
1069affee4b2STejun Heo  * grabbed is at the head of the queue and thus can't clear LINKED
1070affee4b2STejun Heo  * flag of the previous work while there must be a valid next work
1071affee4b2STejun Heo  * after a work with LINKED flag set.
1072affee4b2STejun Heo  *
1073affee4b2STejun Heo  * Note that when @worker is non-NULL, @target may be modified
1074affee4b2STejun Heo  * underneath us, so we can't reliably determine cwq from @target.
10754690c4abSTejun Heo  *
10764690c4abSTejun Heo  * CONTEXT:
10778b03ae3cSTejun Heo  * spin_lock_irq(gcwq->lock).
10784690c4abSTejun Heo  */
107983c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1080affee4b2STejun Heo 			      struct wq_barrier *barr,
1081affee4b2STejun Heo 			      struct work_struct *target, struct worker *worker)
1082fc2e4d70SOleg Nesterov {
1083affee4b2STejun Heo 	struct list_head *head;
1084affee4b2STejun Heo 	unsigned int linked = 0;
1085affee4b2STejun Heo 
1086dc186ad7SThomas Gleixner 	/*
10878b03ae3cSTejun Heo 	 * debugobject calls are safe here even with gcwq->lock locked
1088dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
1089dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
1090dc186ad7SThomas Gleixner 	 * might deadlock.
1091dc186ad7SThomas Gleixner 	 */
1092dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
109322df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1094fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
109583c22520SOleg Nesterov 
1096affee4b2STejun Heo 	/*
1097affee4b2STejun Heo 	 * If @target is currently being executed, schedule the
1098affee4b2STejun Heo 	 * barrier to the worker; otherwise, put it after @target.
1099affee4b2STejun Heo 	 */
1100affee4b2STejun Heo 	if (worker)
1101affee4b2STejun Heo 		head = worker->scheduled.next;
1102affee4b2STejun Heo 	else {
1103affee4b2STejun Heo 		unsigned long *bits = work_data_bits(target);
1104affee4b2STejun Heo 
1105affee4b2STejun Heo 		head = target->entry.next;
1106affee4b2STejun Heo 		/* there can already be other linked works, inherit and set */
1107affee4b2STejun Heo 		linked = *bits & WORK_STRUCT_LINKED;
1108affee4b2STejun Heo 		__set_bit(WORK_STRUCT_LINKED_BIT, bits);
1109affee4b2STejun Heo 	}
1110affee4b2STejun Heo 
1111dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
1112affee4b2STejun Heo 	insert_work(cwq, &barr->work, head,
1113affee4b2STejun Heo 		    work_color_to_flags(WORK_NO_COLOR) | linked);
1114fc2e4d70SOleg Nesterov }
1115fc2e4d70SOleg Nesterov 
111673f53c4aSTejun Heo /**
111773f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
111873f53c4aSTejun Heo  * @wq: workqueue being flushed
111973f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
112073f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
112173f53c4aSTejun Heo  *
112273f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
112373f53c4aSTejun Heo  *
112473f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
112573f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
112673f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
112773f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
112873f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
112973f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
113073f53c4aSTejun Heo  *
113173f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
113273f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
113373f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
113473f53c4aSTejun Heo  * is returned.
113573f53c4aSTejun Heo  *
113673f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
113773f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
113873f53c4aSTejun Heo  * advanced to @work_color.
113973f53c4aSTejun Heo  *
114073f53c4aSTejun Heo  * CONTEXT:
114173f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
114273f53c4aSTejun Heo  *
114373f53c4aSTejun Heo  * RETURNS:
114473f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
114573f53c4aSTejun Heo  * otherwise.
114673f53c4aSTejun Heo  */
114773f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
114873f53c4aSTejun Heo 				      int flush_color, int work_color)
11491da177e4SLinus Torvalds {
115073f53c4aSTejun Heo 	bool wait = false;
115173f53c4aSTejun Heo 	unsigned int cpu;
11521da177e4SLinus Torvalds 
115373f53c4aSTejun Heo 	if (flush_color >= 0) {
115473f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
115573f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
115673f53c4aSTejun Heo 	}
115773f53c4aSTejun Heo 
115873f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
115973f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
11608b03ae3cSTejun Heo 		struct global_cwq *gcwq = cwq->gcwq;
11612355b70fSLai Jiangshan 
11628b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
116373f53c4aSTejun Heo 
116473f53c4aSTejun Heo 		if (flush_color >= 0) {
116573f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
116673f53c4aSTejun Heo 
116773f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
116873f53c4aSTejun Heo 				cwq->flush_color = flush_color;
116973f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
117073f53c4aSTejun Heo 				wait = true;
117183c22520SOleg Nesterov 			}
117273f53c4aSTejun Heo 		}
117373f53c4aSTejun Heo 
117473f53c4aSTejun Heo 		if (work_color >= 0) {
117573f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
117673f53c4aSTejun Heo 			cwq->work_color = work_color;
117773f53c4aSTejun Heo 		}
117873f53c4aSTejun Heo 
11798b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
1180dc186ad7SThomas Gleixner 	}
118114441960SOleg Nesterov 
118273f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
118373f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
118473f53c4aSTejun Heo 
118573f53c4aSTejun Heo 	return wait;
118683c22520SOleg Nesterov }
11871da177e4SLinus Torvalds 
11880fcb78c2SRolf Eike Beer /**
11891da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
11900fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
11911da177e4SLinus Torvalds  *
11921da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
11931da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
11941da177e4SLinus Torvalds  *
1195fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
1196fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
11971da177e4SLinus Torvalds  */
11987ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
11991da177e4SLinus Torvalds {
120073f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
120173f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
120273f53c4aSTejun Heo 		.flush_color = -1,
120373f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
120473f53c4aSTejun Heo 	};
120573f53c4aSTejun Heo 	int next_color;
1206b1f4ec17SOleg Nesterov 
12073295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
12083295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
120973f53c4aSTejun Heo 
121073f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
121173f53c4aSTejun Heo 
121273f53c4aSTejun Heo 	/*
121373f53c4aSTejun Heo 	 * Start-to-wait phase
121473f53c4aSTejun Heo 	 */
121573f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
121673f53c4aSTejun Heo 
121773f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
121873f53c4aSTejun Heo 		/*
121973f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
122073f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
122173f53c4aSTejun Heo 		 * by one.
122273f53c4aSTejun Heo 		 */
122373f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
122473f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
122573f53c4aSTejun Heo 		wq->work_color = next_color;
122673f53c4aSTejun Heo 
122773f53c4aSTejun Heo 		if (!wq->first_flusher) {
122873f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
122973f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
123073f53c4aSTejun Heo 
123173f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
123273f53c4aSTejun Heo 
123373f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
123473f53c4aSTejun Heo 						       wq->work_color)) {
123573f53c4aSTejun Heo 				/* nothing to flush, done */
123673f53c4aSTejun Heo 				wq->flush_color = next_color;
123773f53c4aSTejun Heo 				wq->first_flusher = NULL;
123873f53c4aSTejun Heo 				goto out_unlock;
123973f53c4aSTejun Heo 			}
124073f53c4aSTejun Heo 		} else {
124173f53c4aSTejun Heo 			/* wait in queue */
124273f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
124373f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
124473f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
124573f53c4aSTejun Heo 		}
124673f53c4aSTejun Heo 	} else {
124773f53c4aSTejun Heo 		/*
124873f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
124973f53c4aSTejun Heo 		 * The next flush completion will assign us
125073f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
125173f53c4aSTejun Heo 		 */
125273f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
125373f53c4aSTejun Heo 	}
125473f53c4aSTejun Heo 
125573f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
125673f53c4aSTejun Heo 
125773f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
125873f53c4aSTejun Heo 
125973f53c4aSTejun Heo 	/*
126073f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
126173f53c4aSTejun Heo 	 *
126273f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
126373f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
126473f53c4aSTejun Heo 	 */
126573f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
126673f53c4aSTejun Heo 		return;
126773f53c4aSTejun Heo 
126873f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
126973f53c4aSTejun Heo 
127073f53c4aSTejun Heo 	wq->first_flusher = NULL;
127173f53c4aSTejun Heo 
127273f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
127373f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
127473f53c4aSTejun Heo 
127573f53c4aSTejun Heo 	while (true) {
127673f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
127773f53c4aSTejun Heo 
127873f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
127973f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
128073f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
128173f53c4aSTejun Heo 				break;
128273f53c4aSTejun Heo 			list_del_init(&next->list);
128373f53c4aSTejun Heo 			complete(&next->done);
128473f53c4aSTejun Heo 		}
128573f53c4aSTejun Heo 
128673f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
128773f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
128873f53c4aSTejun Heo 
128973f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
129073f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
129173f53c4aSTejun Heo 
129273f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
129373f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
129473f53c4aSTejun Heo 			/*
129573f53c4aSTejun Heo 			 * Assign the same color to all overflowed
129673f53c4aSTejun Heo 			 * flushers, advance work_color and append to
129773f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
129873f53c4aSTejun Heo 			 * phase for these overflowed flushers.
129973f53c4aSTejun Heo 			 */
130073f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
130173f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
130273f53c4aSTejun Heo 
130373f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
130473f53c4aSTejun Heo 
130573f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
130673f53c4aSTejun Heo 					      &wq->flusher_queue);
130773f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
130873f53c4aSTejun Heo 		}
130973f53c4aSTejun Heo 
131073f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
131173f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
131273f53c4aSTejun Heo 			break;
131373f53c4aSTejun Heo 		}
131473f53c4aSTejun Heo 
131573f53c4aSTejun Heo 		/*
131673f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
131773f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
131873f53c4aSTejun Heo 		 */
131973f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
132073f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
132173f53c4aSTejun Heo 
132273f53c4aSTejun Heo 		list_del_init(&next->list);
132373f53c4aSTejun Heo 		wq->first_flusher = next;
132473f53c4aSTejun Heo 
132573f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
132673f53c4aSTejun Heo 			break;
132773f53c4aSTejun Heo 
132873f53c4aSTejun Heo 		/*
132973f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
133073f53c4aSTejun Heo 		 * flusher and repeat cascading.
133173f53c4aSTejun Heo 		 */
133273f53c4aSTejun Heo 		wq->first_flusher = NULL;
133373f53c4aSTejun Heo 	}
133473f53c4aSTejun Heo 
133573f53c4aSTejun Heo out_unlock:
133673f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
13371da177e4SLinus Torvalds }
1338ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
13391da177e4SLinus Torvalds 
1340db700897SOleg Nesterov /**
1341db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
1342db700897SOleg Nesterov  * @work: the work which is to be flushed
1343db700897SOleg Nesterov  *
1344a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
1345a67da70dSOleg Nesterov  *
1346db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
1347db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
1348db700897SOleg Nesterov  * sense to use this function.
1349db700897SOleg Nesterov  */
1350db700897SOleg Nesterov int flush_work(struct work_struct *work)
1351db700897SOleg Nesterov {
1352affee4b2STejun Heo 	struct worker *worker = NULL;
1353db700897SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
13548b03ae3cSTejun Heo 	struct global_cwq *gcwq;
1355db700897SOleg Nesterov 	struct wq_barrier barr;
1356db700897SOleg Nesterov 
1357db700897SOleg Nesterov 	might_sleep();
1358db700897SOleg Nesterov 	cwq = get_wq_data(work);
1359db700897SOleg Nesterov 	if (!cwq)
1360db700897SOleg Nesterov 		return 0;
13618b03ae3cSTejun Heo 	gcwq = cwq->gcwq;
1362db700897SOleg Nesterov 
13633295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
13643295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
1365a67da70dSOleg Nesterov 
13668b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1367db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
1368db700897SOleg Nesterov 		/*
1369db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
1370db700897SOleg Nesterov 		 * If it was re-queued under us we are not going to wait.
1371db700897SOleg Nesterov 		 */
1372db700897SOleg Nesterov 		smp_rmb();
1373db700897SOleg Nesterov 		if (unlikely(cwq != get_wq_data(work)))
13744690c4abSTejun Heo 			goto already_gone;
1375db700897SOleg Nesterov 	} else {
1376affee4b2STejun Heo 		if (cwq->worker && cwq->worker->current_work == work)
1377affee4b2STejun Heo 			worker = cwq->worker;
1378affee4b2STejun Heo 		if (!worker)
13794690c4abSTejun Heo 			goto already_gone;
1380db700897SOleg Nesterov 	}
1381db700897SOleg Nesterov 
1382affee4b2STejun Heo 	insert_wq_barrier(cwq, &barr, work, worker);
13838b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1384db700897SOleg Nesterov 	wait_for_completion(&barr.done);
1385dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
1386db700897SOleg Nesterov 	return 1;
13874690c4abSTejun Heo already_gone:
13888b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
13894690c4abSTejun Heo 	return 0;
1390db700897SOleg Nesterov }
1391db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
1392db700897SOleg Nesterov 
13936e84d644SOleg Nesterov /*
13941f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
13956e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
13966e84d644SOleg Nesterov  */
13976e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
13986e84d644SOleg Nesterov {
13998b03ae3cSTejun Heo 	struct global_cwq *gcwq;
14006e84d644SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
14011f1f642eSOleg Nesterov 	int ret = -1;
14026e84d644SOleg Nesterov 
140322df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
14041f1f642eSOleg Nesterov 		return 0;
14056e84d644SOleg Nesterov 
14066e84d644SOleg Nesterov 	/*
14076e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
14086e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
14096e84d644SOleg Nesterov 	 */
14106e84d644SOleg Nesterov 
14116e84d644SOleg Nesterov 	cwq = get_wq_data(work);
14126e84d644SOleg Nesterov 	if (!cwq)
14136e84d644SOleg Nesterov 		return ret;
14148b03ae3cSTejun Heo 	gcwq = cwq->gcwq;
14156e84d644SOleg Nesterov 
14168b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
14176e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
14186e84d644SOleg Nesterov 		/*
14196e84d644SOleg Nesterov 		 * This work is queued, but perhaps we locked the wrong cwq.
14206e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
14216e84d644SOleg Nesterov 		 * insert_work()->wmb().
14226e84d644SOleg Nesterov 		 */
14236e84d644SOleg Nesterov 		smp_rmb();
14246e84d644SOleg Nesterov 		if (cwq == get_wq_data(work)) {
1425dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
14266e84d644SOleg Nesterov 			list_del_init(&work->entry);
142773f53c4aSTejun Heo 			cwq_dec_nr_in_flight(cwq, get_work_color(work));
14286e84d644SOleg Nesterov 			ret = 1;
14296e84d644SOleg Nesterov 		}
14306e84d644SOleg Nesterov 	}
14318b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
14326e84d644SOleg Nesterov 
14336e84d644SOleg Nesterov 	return ret;
14346e84d644SOleg Nesterov }
14356e84d644SOleg Nesterov 
14366e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
1437b89deed3SOleg Nesterov 				struct work_struct *work)
1438b89deed3SOleg Nesterov {
14398b03ae3cSTejun Heo 	struct global_cwq *gcwq = cwq->gcwq;
1440b89deed3SOleg Nesterov 	struct wq_barrier barr;
1441affee4b2STejun Heo 	struct worker *worker;
1442b89deed3SOleg Nesterov 
14438b03ae3cSTejun Heo 	spin_lock_irq(&gcwq->lock);
1444affee4b2STejun Heo 
1445affee4b2STejun Heo 	worker = NULL;
1446c34056a3STejun Heo 	if (unlikely(cwq->worker && cwq->worker->current_work == work)) {
1447affee4b2STejun Heo 		worker = cwq->worker;
1448affee4b2STejun Heo 		insert_wq_barrier(cwq, &barr, work, worker);
1449b89deed3SOleg Nesterov 	}
1450affee4b2STejun Heo 
14518b03ae3cSTejun Heo 	spin_unlock_irq(&gcwq->lock);
1452b89deed3SOleg Nesterov 
1453affee4b2STejun Heo 	if (unlikely(worker)) {
1454b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
1455dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
1456dc186ad7SThomas Gleixner 	}
1457b89deed3SOleg Nesterov }
1458b89deed3SOleg Nesterov 
14596e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
1460b89deed3SOleg Nesterov {
1461b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
146228e53bddSOleg Nesterov 	struct workqueue_struct *wq;
1463b1f4ec17SOleg Nesterov 	int cpu;
1464b89deed3SOleg Nesterov 
1465f293ea92SOleg Nesterov 	might_sleep();
1466f293ea92SOleg Nesterov 
14673295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
14683295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
14694e6045f1SJohannes Berg 
1470b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
1471b89deed3SOleg Nesterov 	if (!cwq)
14723af24433SOleg Nesterov 		return;
1473b89deed3SOleg Nesterov 
147428e53bddSOleg Nesterov 	wq = cwq->wq;
147528e53bddSOleg Nesterov 
14761537663fSTejun Heo 	for_each_possible_cpu(cpu)
14774690c4abSTejun Heo 		wait_on_cpu_work(get_cwq(cpu, wq), work);
14786e84d644SOleg Nesterov }
14796e84d644SOleg Nesterov 
14801f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
14811f1f642eSOleg Nesterov 				struct timer_list* timer)
14821f1f642eSOleg Nesterov {
14831f1f642eSOleg Nesterov 	int ret;
14841f1f642eSOleg Nesterov 
14851f1f642eSOleg Nesterov 	do {
14861f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
14871f1f642eSOleg Nesterov 		if (!ret)
14881f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
14891f1f642eSOleg Nesterov 		wait_on_work(work);
14901f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
14911f1f642eSOleg Nesterov 
14924d707b9fSOleg Nesterov 	clear_wq_data(work);
14931f1f642eSOleg Nesterov 	return ret;
14941f1f642eSOleg Nesterov }
14951f1f642eSOleg Nesterov 
14966e84d644SOleg Nesterov /**
14976e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
14986e84d644SOleg Nesterov  * @work: the work which is to be flushed
14996e84d644SOleg Nesterov  *
15001f1f642eSOleg Nesterov  * Returns true if @work was pending.
15011f1f642eSOleg Nesterov  *
15026e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
15036e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
15046e84d644SOleg Nesterov  * has completed.
15056e84d644SOleg Nesterov  *
15066e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
15076e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
15086e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
15096e84d644SOleg Nesterov  * workqueue.
15106e84d644SOleg Nesterov  *
15116e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
15126e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
15136e84d644SOleg Nesterov  *
15146e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
15156e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
15166e84d644SOleg Nesterov  */
15171f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
15186e84d644SOleg Nesterov {
15191f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
1520b89deed3SOleg Nesterov }
152128e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
1522b89deed3SOleg Nesterov 
15236e84d644SOleg Nesterov /**
1524f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
15256e84d644SOleg Nesterov  * @dwork: the delayed work struct
15266e84d644SOleg Nesterov  *
15271f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
15281f1f642eSOleg Nesterov  *
15296e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
15306e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
15316e84d644SOleg Nesterov  */
15321f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
15336e84d644SOleg Nesterov {
15341f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
15356e84d644SOleg Nesterov }
1536f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
15371da177e4SLinus Torvalds 
15386e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
15391da177e4SLinus Torvalds 
15400fcb78c2SRolf Eike Beer /**
15410fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
15420fcb78c2SRolf Eike Beer  * @work: job to be done
15430fcb78c2SRolf Eike Beer  *
15445b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
15455b0f437dSBart Van Assche  * non-zero otherwise.
15465b0f437dSBart Van Assche  *
15475b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
15485b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
15495b0f437dSBart Van Assche  * workqueue otherwise.
15500fcb78c2SRolf Eike Beer  */
15517ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
15521da177e4SLinus Torvalds {
15531da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
15541da177e4SLinus Torvalds }
1555ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
15561da177e4SLinus Torvalds 
1557c1a220e7SZhang Rui /*
1558c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
1559c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
1560c1a220e7SZhang Rui  * @work: job to be done
1561c1a220e7SZhang Rui  *
1562c1a220e7SZhang Rui  * This puts a job on a specific cpu
1563c1a220e7SZhang Rui  */
1564c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
1565c1a220e7SZhang Rui {
1566c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
1567c1a220e7SZhang Rui }
1568c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
1569c1a220e7SZhang Rui 
15700fcb78c2SRolf Eike Beer /**
15710fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
157252bad64dSDavid Howells  * @dwork: job to be done
157352bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
15740fcb78c2SRolf Eike Beer  *
15750fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
15760fcb78c2SRolf Eike Beer  * workqueue.
15770fcb78c2SRolf Eike Beer  */
15787ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
157982f67cd9SIngo Molnar 					unsigned long delay)
15801da177e4SLinus Torvalds {
158152bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
15821da177e4SLinus Torvalds }
1583ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
15841da177e4SLinus Torvalds 
15850fcb78c2SRolf Eike Beer /**
15868c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
15878c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
15888c53e463SLinus Torvalds  *
15898c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
15908c53e463SLinus Torvalds  */
15918c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
15928c53e463SLinus Torvalds {
15938c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
15944690c4abSTejun Heo 		__queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
15954690c4abSTejun Heo 			     &dwork->work);
15968c53e463SLinus Torvalds 		put_cpu();
15978c53e463SLinus Torvalds 	}
15988c53e463SLinus Torvalds 	flush_work(&dwork->work);
15998c53e463SLinus Torvalds }
16008c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
16018c53e463SLinus Torvalds 
16028c53e463SLinus Torvalds /**
16030fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
16040fcb78c2SRolf Eike Beer  * @cpu: cpu to use
160552bad64dSDavid Howells  * @dwork: job to be done
16060fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
16070fcb78c2SRolf Eike Beer  *
16080fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
16090fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
16100fcb78c2SRolf Eike Beer  */
16111da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
161252bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
16131da177e4SLinus Torvalds {
161452bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
16151da177e4SLinus Torvalds }
1616ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
16171da177e4SLinus Torvalds 
1618b6136773SAndrew Morton /**
1619b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
1620b6136773SAndrew Morton  * @func: the function to call
1621b6136773SAndrew Morton  *
1622b6136773SAndrew Morton  * Returns zero on success.
1623b6136773SAndrew Morton  * Returns -ve errno on failure.
1624b6136773SAndrew Morton  *
1625b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
1626b6136773SAndrew Morton  */
162765f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
162815316ba8SChristoph Lameter {
162915316ba8SChristoph Lameter 	int cpu;
163065a64464SAndi Kleen 	int orig = -1;
1631b6136773SAndrew Morton 	struct work_struct *works;
163215316ba8SChristoph Lameter 
1633b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
1634b6136773SAndrew Morton 	if (!works)
163515316ba8SChristoph Lameter 		return -ENOMEM;
1636b6136773SAndrew Morton 
163795402b38SGautham R Shenoy 	get_online_cpus();
163893981800STejun Heo 
163993981800STejun Heo 	/*
164093981800STejun Heo 	 * When running in keventd don't schedule a work item on
164193981800STejun Heo 	 * itself.  Can just call directly because the work queue is
164293981800STejun Heo 	 * already bound.  This also is faster.
164393981800STejun Heo 	 */
164493981800STejun Heo 	if (current_is_keventd())
164593981800STejun Heo 		orig = raw_smp_processor_id();
164693981800STejun Heo 
164715316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
16489bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
16499bfb1839SIngo Molnar 
16509bfb1839SIngo Molnar 		INIT_WORK(work, func);
165193981800STejun Heo 		if (cpu != orig)
16528de6d308SOleg Nesterov 			schedule_work_on(cpu, work);
165315316ba8SChristoph Lameter 	}
165493981800STejun Heo 	if (orig >= 0)
165593981800STejun Heo 		func(per_cpu_ptr(works, orig));
165693981800STejun Heo 
165793981800STejun Heo 	for_each_online_cpu(cpu)
16588616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
165993981800STejun Heo 
166095402b38SGautham R Shenoy 	put_online_cpus();
1661b6136773SAndrew Morton 	free_percpu(works);
166215316ba8SChristoph Lameter 	return 0;
166315316ba8SChristoph Lameter }
166415316ba8SChristoph Lameter 
1665eef6a7d5SAlan Stern /**
1666eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
1667eef6a7d5SAlan Stern  *
1668eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
1669eef6a7d5SAlan Stern  * completion.
1670eef6a7d5SAlan Stern  *
1671eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
1672eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
1673eef6a7d5SAlan Stern  * will lead to deadlock:
1674eef6a7d5SAlan Stern  *
1675eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
1676eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
1677eef6a7d5SAlan Stern  *
1678eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
1679eef6a7d5SAlan Stern  *
1680eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
1681eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
1682eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
1683eef6a7d5SAlan Stern  *
1684eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
1685eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
1686eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
1687eef6a7d5SAlan Stern  * cancel_work_sync() instead.
1688eef6a7d5SAlan Stern  */
16891da177e4SLinus Torvalds void flush_scheduled_work(void)
16901da177e4SLinus Torvalds {
16911da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
16921da177e4SLinus Torvalds }
1693ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
16941da177e4SLinus Torvalds 
16951da177e4SLinus Torvalds /**
16961fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
16971fa44ecaSJames Bottomley  * @fn:		the function to execute
16981fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
16991fa44ecaSJames Bottomley  *		be available when the work executes)
17001fa44ecaSJames Bottomley  *
17011fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
17021fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
17031fa44ecaSJames Bottomley  *
17041fa44ecaSJames Bottomley  * Returns:	0 - function was executed
17051fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
17061fa44ecaSJames Bottomley  */
170765f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
17081fa44ecaSJames Bottomley {
17091fa44ecaSJames Bottomley 	if (!in_interrupt()) {
171065f27f38SDavid Howells 		fn(&ew->work);
17111fa44ecaSJames Bottomley 		return 0;
17121fa44ecaSJames Bottomley 	}
17131fa44ecaSJames Bottomley 
171465f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
17151fa44ecaSJames Bottomley 	schedule_work(&ew->work);
17161fa44ecaSJames Bottomley 
17171fa44ecaSJames Bottomley 	return 1;
17181fa44ecaSJames Bottomley }
17191fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
17201fa44ecaSJames Bottomley 
17211da177e4SLinus Torvalds int keventd_up(void)
17221da177e4SLinus Torvalds {
17231da177e4SLinus Torvalds 	return keventd_wq != NULL;
17241da177e4SLinus Torvalds }
17251da177e4SLinus Torvalds 
17261da177e4SLinus Torvalds int current_is_keventd(void)
17271da177e4SLinus Torvalds {
17281da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
1729d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
17301da177e4SLinus Torvalds 	int ret = 0;
17311da177e4SLinus Torvalds 
17321da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
17331da177e4SLinus Torvalds 
17341537663fSTejun Heo 	cwq = get_cwq(cpu, keventd_wq);
1735c34056a3STejun Heo 	if (current == cwq->worker->task)
17361da177e4SLinus Torvalds 		ret = 1;
17371da177e4SLinus Torvalds 
17381da177e4SLinus Torvalds 	return ret;
17391da177e4SLinus Torvalds 
17401da177e4SLinus Torvalds }
17411da177e4SLinus Torvalds 
17420f900049STejun Heo static struct cpu_workqueue_struct *alloc_cwqs(void)
17430f900049STejun Heo {
17440f900049STejun Heo 	/*
17450f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
17460f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
17470f900049STejun Heo 	 * unsigned long long.
17480f900049STejun Heo 	 */
17490f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
17500f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
17510f900049STejun Heo 				   __alignof__(unsigned long long));
17520f900049STejun Heo 	struct cpu_workqueue_struct *cwqs;
17530f900049STejun Heo #ifndef CONFIG_SMP
17540f900049STejun Heo 	void *ptr;
17550f900049STejun Heo 
17560f900049STejun Heo 	/*
17570f900049STejun Heo 	 * On UP, percpu allocator doesn't honor alignment parameter
17580f900049STejun Heo 	 * and simply uses arch-dependent default.  Allocate enough
17590f900049STejun Heo 	 * room to align cwq and put an extra pointer at the end
17600f900049STejun Heo 	 * pointing back to the originally allocated pointer which
17610f900049STejun Heo 	 * will be used for free.
17620f900049STejun Heo 	 *
17630f900049STejun Heo 	 * FIXME: This really belongs to UP percpu code.  Update UP
17640f900049STejun Heo 	 * percpu code to honor alignment and remove this ugliness.
17650f900049STejun Heo 	 */
17660f900049STejun Heo 	ptr = __alloc_percpu(size + align + sizeof(void *), 1);
17670f900049STejun Heo 	cwqs = PTR_ALIGN(ptr, align);
17680f900049STejun Heo 	*(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
17690f900049STejun Heo #else
17700f900049STejun Heo 	/* On SMP, percpu allocator can do it itself */
17710f900049STejun Heo 	cwqs = __alloc_percpu(size, align);
17720f900049STejun Heo #endif
17730f900049STejun Heo 	/* just in case, make sure it's actually aligned */
17740f900049STejun Heo 	BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
17750f900049STejun Heo 	return cwqs;
17760f900049STejun Heo }
17770f900049STejun Heo 
17780f900049STejun Heo static void free_cwqs(struct cpu_workqueue_struct *cwqs)
17790f900049STejun Heo {
17800f900049STejun Heo #ifndef CONFIG_SMP
17810f900049STejun Heo 	/* on UP, the pointer to free is stored right after the cwq */
17820f900049STejun Heo 	if (cwqs)
17830f900049STejun Heo 		free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
17840f900049STejun Heo #else
17850f900049STejun Heo 	free_percpu(cwqs);
17860f900049STejun Heo #endif
17870f900049STejun Heo }
17880f900049STejun Heo 
17894e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
179097e37d7bSTejun Heo 						unsigned int flags,
17911e19ffc6STejun Heo 						int max_active,
1792eb13ba87SJohannes Berg 						struct lock_class_key *key,
1793eb13ba87SJohannes Berg 						const char *lock_name)
17943af24433SOleg Nesterov {
17953af24433SOleg Nesterov 	struct workqueue_struct *wq;
1796c34056a3STejun Heo 	bool failed = false;
1797c34056a3STejun Heo 	unsigned int cpu;
17983af24433SOleg Nesterov 
17991e19ffc6STejun Heo 	max_active = clamp_val(max_active, 1, INT_MAX);
18001e19ffc6STejun Heo 
18013af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
18023af24433SOleg Nesterov 	if (!wq)
18034690c4abSTejun Heo 		goto err;
18043af24433SOleg Nesterov 
18050f900049STejun Heo 	wq->cpu_wq = alloc_cwqs();
18064690c4abSTejun Heo 	if (!wq->cpu_wq)
18074690c4abSTejun Heo 		goto err;
18083af24433SOleg Nesterov 
180997e37d7bSTejun Heo 	wq->flags = flags;
1810a0a1a5fdSTejun Heo 	wq->saved_max_active = max_active;
181173f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
181273f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
181373f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
181473f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
1815*502ca9d8STejun Heo 	wq->single_cpu = NR_CPUS;
1816*502ca9d8STejun Heo 
18173af24433SOleg Nesterov 	wq->name = name;
1818eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1819cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
18203af24433SOleg Nesterov 
18213da1c84cSOleg Nesterov 	cpu_maps_update_begin();
18226af8bf3dSOleg Nesterov 	/*
18236af8bf3dSOleg Nesterov 	 * We must initialize cwqs for each possible cpu even if we
18246af8bf3dSOleg Nesterov 	 * are going to call destroy_workqueue() finally. Otherwise
18256af8bf3dSOleg Nesterov 	 * cpu_up() can hit the uninitialized cwq once we drop the
18266af8bf3dSOleg Nesterov 	 * lock.
18276af8bf3dSOleg Nesterov 	 */
18283af24433SOleg Nesterov 	for_each_possible_cpu(cpu) {
18291537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
18308b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
18311537663fSTejun Heo 
18320f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
18338b03ae3cSTejun Heo 		cwq->gcwq = gcwq;
1834c34056a3STejun Heo 		cwq->wq = wq;
183573f53c4aSTejun Heo 		cwq->flush_color = -1;
18361e19ffc6STejun Heo 		cwq->max_active = max_active;
18371537663fSTejun Heo 		INIT_LIST_HEAD(&cwq->worklist);
18381e19ffc6STejun Heo 		INIT_LIST_HEAD(&cwq->delayed_works);
18391537663fSTejun Heo 
1840c34056a3STejun Heo 		if (failed)
18413af24433SOleg Nesterov 			continue;
1842*502ca9d8STejun Heo 		cwq->worker = create_worker(cwq, cpu_online(cpu));
1843c34056a3STejun Heo 		if (cwq->worker)
1844c34056a3STejun Heo 			start_worker(cwq->worker);
18451537663fSTejun Heo 		else
1846c34056a3STejun Heo 			failed = true;
18473af24433SOleg Nesterov 	}
18481537663fSTejun Heo 
1849a0a1a5fdSTejun Heo 	/*
1850a0a1a5fdSTejun Heo 	 * workqueue_lock protects global freeze state and workqueues
1851a0a1a5fdSTejun Heo 	 * list.  Grab it, set max_active accordingly and add the new
1852a0a1a5fdSTejun Heo 	 * workqueue to workqueues list.
1853a0a1a5fdSTejun Heo 	 */
18541537663fSTejun Heo 	spin_lock(&workqueue_lock);
1855a0a1a5fdSTejun Heo 
1856a0a1a5fdSTejun Heo 	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
1857a0a1a5fdSTejun Heo 		for_each_possible_cpu(cpu)
1858a0a1a5fdSTejun Heo 			get_cwq(cpu, wq)->max_active = 0;
1859a0a1a5fdSTejun Heo 
18601537663fSTejun Heo 	list_add(&wq->list, &workqueues);
1861a0a1a5fdSTejun Heo 
18621537663fSTejun Heo 	spin_unlock(&workqueue_lock);
18631537663fSTejun Heo 
18643da1c84cSOleg Nesterov 	cpu_maps_update_done();
18653af24433SOleg Nesterov 
1866c34056a3STejun Heo 	if (failed) {
18673af24433SOleg Nesterov 		destroy_workqueue(wq);
18683af24433SOleg Nesterov 		wq = NULL;
18693af24433SOleg Nesterov 	}
18703af24433SOleg Nesterov 	return wq;
18714690c4abSTejun Heo err:
18724690c4abSTejun Heo 	if (wq) {
18730f900049STejun Heo 		free_cwqs(wq->cpu_wq);
18744690c4abSTejun Heo 		kfree(wq);
18754690c4abSTejun Heo 	}
18764690c4abSTejun Heo 	return NULL;
18773af24433SOleg Nesterov }
18784e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
18793af24433SOleg Nesterov 
18803af24433SOleg Nesterov /**
18813af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
18823af24433SOleg Nesterov  * @wq: target workqueue
18833af24433SOleg Nesterov  *
18843af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
18853af24433SOleg Nesterov  */
18863af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
18873af24433SOleg Nesterov {
1888c8e55f36STejun Heo 	unsigned int cpu;
18893af24433SOleg Nesterov 
1890a0a1a5fdSTejun Heo 	flush_workqueue(wq);
1891a0a1a5fdSTejun Heo 
1892a0a1a5fdSTejun Heo 	/*
1893a0a1a5fdSTejun Heo 	 * wq list is used to freeze wq, remove from list after
1894a0a1a5fdSTejun Heo 	 * flushing is complete in case freeze races us.
1895a0a1a5fdSTejun Heo 	 */
18963da1c84cSOleg Nesterov 	cpu_maps_update_begin();
189795402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
18983af24433SOleg Nesterov 	list_del(&wq->list);
189995402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
19003da1c84cSOleg Nesterov 	cpu_maps_update_done();
19013af24433SOleg Nesterov 
190273f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
190373f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
190473f53c4aSTejun Heo 		int i;
190573f53c4aSTejun Heo 
1906c34056a3STejun Heo 		if (cwq->worker) {
1907c8e55f36STejun Heo 			spin_lock_irq(&cwq->gcwq->lock);
1908c34056a3STejun Heo 			destroy_worker(cwq->worker);
1909c34056a3STejun Heo 			cwq->worker = NULL;
1910c8e55f36STejun Heo 			spin_unlock_irq(&cwq->gcwq->lock);
191173f53c4aSTejun Heo 		}
191273f53c4aSTejun Heo 
191373f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
191473f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
19151e19ffc6STejun Heo 		BUG_ON(cwq->nr_active);
19161e19ffc6STejun Heo 		BUG_ON(!list_empty(&cwq->delayed_works));
191773f53c4aSTejun Heo 	}
19181537663fSTejun Heo 
19190f900049STejun Heo 	free_cwqs(wq->cpu_wq);
19203af24433SOleg Nesterov 	kfree(wq);
19213af24433SOleg Nesterov }
19223af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
19233af24433SOleg Nesterov 
1924db7bccf4STejun Heo /*
1925db7bccf4STejun Heo  * CPU hotplug.
1926db7bccf4STejun Heo  *
1927db7bccf4STejun Heo  * CPU hotplug is implemented by allowing cwqs to be detached from
1928db7bccf4STejun Heo  * CPU, running with unbound workers and allowing them to be
1929db7bccf4STejun Heo  * reattached later if the cpu comes back online.  A separate thread
1930db7bccf4STejun Heo  * is created to govern cwqs in such state and is called the trustee.
1931db7bccf4STejun Heo  *
1932db7bccf4STejun Heo  * Trustee states and their descriptions.
1933db7bccf4STejun Heo  *
1934db7bccf4STejun Heo  * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
1935db7bccf4STejun Heo  *		new trustee is started with this state.
1936db7bccf4STejun Heo  *
1937db7bccf4STejun Heo  * IN_CHARGE	Once started, trustee will enter this state after
1938db7bccf4STejun Heo  *		making all existing workers rogue.  DOWN_PREPARE waits
1939db7bccf4STejun Heo  *		for trustee to enter this state.  After reaching
1940db7bccf4STejun Heo  *		IN_CHARGE, trustee tries to execute the pending
1941db7bccf4STejun Heo  *		worklist until it's empty and the state is set to
1942db7bccf4STejun Heo  *		BUTCHER, or the state is set to RELEASE.
1943db7bccf4STejun Heo  *
1944db7bccf4STejun Heo  * BUTCHER	Command state which is set by the cpu callback after
1945db7bccf4STejun Heo  *		the cpu has went down.  Once this state is set trustee
1946db7bccf4STejun Heo  *		knows that there will be no new works on the worklist
1947db7bccf4STejun Heo  *		and once the worklist is empty it can proceed to
1948db7bccf4STejun Heo  *		killing idle workers.
1949db7bccf4STejun Heo  *
1950db7bccf4STejun Heo  * RELEASE	Command state which is set by the cpu callback if the
1951db7bccf4STejun Heo  *		cpu down has been canceled or it has come online
1952db7bccf4STejun Heo  *		again.  After recognizing this state, trustee stops
1953db7bccf4STejun Heo  *		trying to drain or butcher and transits to DONE.
1954db7bccf4STejun Heo  *
1955db7bccf4STejun Heo  * DONE		Trustee will enter this state after BUTCHER or RELEASE
1956db7bccf4STejun Heo  *		is complete.
1957db7bccf4STejun Heo  *
1958db7bccf4STejun Heo  *          trustee                 CPU                draining
1959db7bccf4STejun Heo  *         took over                down               complete
1960db7bccf4STejun Heo  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
1961db7bccf4STejun Heo  *                        |                     |                  ^
1962db7bccf4STejun Heo  *                        | CPU is back online  v   return workers |
1963db7bccf4STejun Heo  *                         ----------------> RELEASE --------------
1964db7bccf4STejun Heo  */
1965db7bccf4STejun Heo 
1966db7bccf4STejun Heo /**
1967db7bccf4STejun Heo  * trustee_wait_event_timeout - timed event wait for trustee
1968db7bccf4STejun Heo  * @cond: condition to wait for
1969db7bccf4STejun Heo  * @timeout: timeout in jiffies
1970db7bccf4STejun Heo  *
1971db7bccf4STejun Heo  * wait_event_timeout() for trustee to use.  Handles locking and
1972db7bccf4STejun Heo  * checks for RELEASE request.
1973db7bccf4STejun Heo  *
1974db7bccf4STejun Heo  * CONTEXT:
1975db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1976db7bccf4STejun Heo  * multiple times.  To be used by trustee.
1977db7bccf4STejun Heo  *
1978db7bccf4STejun Heo  * RETURNS:
1979db7bccf4STejun Heo  * Positive indicating left time if @cond is satisfied, 0 if timed
1980db7bccf4STejun Heo  * out, -1 if canceled.
1981db7bccf4STejun Heo  */
1982db7bccf4STejun Heo #define trustee_wait_event_timeout(cond, timeout) ({			\
1983db7bccf4STejun Heo 	long __ret = (timeout);						\
1984db7bccf4STejun Heo 	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
1985db7bccf4STejun Heo 	       __ret) {							\
1986db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);				\
1987db7bccf4STejun Heo 		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
1988db7bccf4STejun Heo 			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
1989db7bccf4STejun Heo 			__ret);						\
1990db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);				\
1991db7bccf4STejun Heo 	}								\
1992db7bccf4STejun Heo 	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
1993db7bccf4STejun Heo })
1994db7bccf4STejun Heo 
1995db7bccf4STejun Heo /**
1996db7bccf4STejun Heo  * trustee_wait_event - event wait for trustee
1997db7bccf4STejun Heo  * @cond: condition to wait for
1998db7bccf4STejun Heo  *
1999db7bccf4STejun Heo  * wait_event() for trustee to use.  Automatically handles locking and
2000db7bccf4STejun Heo  * checks for CANCEL request.
2001db7bccf4STejun Heo  *
2002db7bccf4STejun Heo  * CONTEXT:
2003db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2004db7bccf4STejun Heo  * multiple times.  To be used by trustee.
2005db7bccf4STejun Heo  *
2006db7bccf4STejun Heo  * RETURNS:
2007db7bccf4STejun Heo  * 0 if @cond is satisfied, -1 if canceled.
2008db7bccf4STejun Heo  */
2009db7bccf4STejun Heo #define trustee_wait_event(cond) ({					\
2010db7bccf4STejun Heo 	long __ret1;							\
2011db7bccf4STejun Heo 	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2012db7bccf4STejun Heo 	__ret1 < 0 ? -1 : 0;						\
2013db7bccf4STejun Heo })
2014db7bccf4STejun Heo 
2015db7bccf4STejun Heo static int __cpuinit trustee_thread(void *__gcwq)
2016db7bccf4STejun Heo {
2017db7bccf4STejun Heo 	struct global_cwq *gcwq = __gcwq;
2018db7bccf4STejun Heo 	struct worker *worker;
2019db7bccf4STejun Heo 	struct hlist_node *pos;
2020db7bccf4STejun Heo 	int i;
2021db7bccf4STejun Heo 
2022db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2023db7bccf4STejun Heo 
2024db7bccf4STejun Heo 	spin_lock_irq(&gcwq->lock);
2025db7bccf4STejun Heo 	/*
2026*502ca9d8STejun Heo 	 * Make all workers rogue.  Trustee must be bound to the
2027*502ca9d8STejun Heo 	 * target cpu and can't be cancelled.
2028db7bccf4STejun Heo 	 */
2029db7bccf4STejun Heo 	BUG_ON(gcwq->cpu != smp_processor_id());
2030db7bccf4STejun Heo 
2031db7bccf4STejun Heo 	list_for_each_entry(worker, &gcwq->idle_list, entry)
2032db7bccf4STejun Heo 		worker->flags |= WORKER_ROGUE;
2033db7bccf4STejun Heo 
2034db7bccf4STejun Heo 	for_each_busy_worker(worker, i, pos, gcwq)
2035db7bccf4STejun Heo 		worker->flags |= WORKER_ROGUE;
2036db7bccf4STejun Heo 
2037db7bccf4STejun Heo 	/*
2038db7bccf4STejun Heo 	 * We're now in charge.  Notify and proceed to drain.  We need
2039db7bccf4STejun Heo 	 * to keep the gcwq running during the whole CPU down
2040db7bccf4STejun Heo 	 * procedure as other cpu hotunplug callbacks may need to
2041db7bccf4STejun Heo 	 * flush currently running tasks.
2042db7bccf4STejun Heo 	 */
2043db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2044db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
2045db7bccf4STejun Heo 
2046db7bccf4STejun Heo 	/*
2047db7bccf4STejun Heo 	 * The original cpu is in the process of dying and may go away
2048db7bccf4STejun Heo 	 * anytime now.  When that happens, we and all workers would
2049db7bccf4STejun Heo 	 * be migrated to other cpus.  Try draining any left work.
2050db7bccf4STejun Heo 	 * Note that if the gcwq is frozen, there may be frozen works
2051db7bccf4STejun Heo 	 * in freezeable cwqs.  Don't declare completion while frozen.
2052db7bccf4STejun Heo 	 */
2053db7bccf4STejun Heo 	while (gcwq->nr_workers != gcwq->nr_idle ||
2054db7bccf4STejun Heo 	       gcwq->flags & GCWQ_FREEZING ||
2055db7bccf4STejun Heo 	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2056db7bccf4STejun Heo 		/* give a breather */
2057db7bccf4STejun Heo 		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2058db7bccf4STejun Heo 			break;
2059db7bccf4STejun Heo 	}
2060db7bccf4STejun Heo 
2061db7bccf4STejun Heo 	/* notify completion */
2062db7bccf4STejun Heo 	gcwq->trustee = NULL;
2063db7bccf4STejun Heo 	gcwq->trustee_state = TRUSTEE_DONE;
2064db7bccf4STejun Heo 	wake_up_all(&gcwq->trustee_wait);
2065db7bccf4STejun Heo 	spin_unlock_irq(&gcwq->lock);
2066db7bccf4STejun Heo 	return 0;
2067db7bccf4STejun Heo }
2068db7bccf4STejun Heo 
2069db7bccf4STejun Heo /**
2070db7bccf4STejun Heo  * wait_trustee_state - wait for trustee to enter the specified state
2071db7bccf4STejun Heo  * @gcwq: gcwq the trustee of interest belongs to
2072db7bccf4STejun Heo  * @state: target state to wait for
2073db7bccf4STejun Heo  *
2074db7bccf4STejun Heo  * Wait for the trustee to reach @state.  DONE is already matched.
2075db7bccf4STejun Heo  *
2076db7bccf4STejun Heo  * CONTEXT:
2077db7bccf4STejun Heo  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2078db7bccf4STejun Heo  * multiple times.  To be used by cpu_callback.
2079db7bccf4STejun Heo  */
2080db7bccf4STejun Heo static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2081db7bccf4STejun Heo {
2082db7bccf4STejun Heo 	if (!(gcwq->trustee_state == state ||
2083db7bccf4STejun Heo 	      gcwq->trustee_state == TRUSTEE_DONE)) {
2084db7bccf4STejun Heo 		spin_unlock_irq(&gcwq->lock);
2085db7bccf4STejun Heo 		__wait_event(gcwq->trustee_wait,
2086db7bccf4STejun Heo 			     gcwq->trustee_state == state ||
2087db7bccf4STejun Heo 			     gcwq->trustee_state == TRUSTEE_DONE);
2088db7bccf4STejun Heo 		spin_lock_irq(&gcwq->lock);
2089db7bccf4STejun Heo 	}
2090db7bccf4STejun Heo }
2091db7bccf4STejun Heo 
20929c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
20931da177e4SLinus Torvalds 						unsigned long action,
20941da177e4SLinus Torvalds 						void *hcpu)
20951da177e4SLinus Torvalds {
20963af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
2097db7bccf4STejun Heo 	struct global_cwq *gcwq = get_gcwq(cpu);
2098db7bccf4STejun Heo 	struct task_struct *new_trustee = NULL;
2099db7bccf4STejun Heo 	struct worker *worker;
2100db7bccf4STejun Heo 	struct hlist_node *pos;
2101db7bccf4STejun Heo 	unsigned long flags;
2102db7bccf4STejun Heo 	int i;
21031da177e4SLinus Torvalds 
21048bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
21058bb78442SRafael J. Wysocki 
2106db7bccf4STejun Heo 	switch (action) {
2107db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
2108db7bccf4STejun Heo 		new_trustee = kthread_create(trustee_thread, gcwq,
2109db7bccf4STejun Heo 					     "workqueue_trustee/%d\n", cpu);
2110db7bccf4STejun Heo 		if (IS_ERR(new_trustee))
2111db7bccf4STejun Heo 			return notifier_from_errno(PTR_ERR(new_trustee));
2112db7bccf4STejun Heo 		kthread_bind(new_trustee, cpu);
2113db7bccf4STejun Heo 	}
21141537663fSTejun Heo 
2115db7bccf4STejun Heo 	/* some are called w/ irq disabled, don't disturb irq status */
2116db7bccf4STejun Heo 	spin_lock_irqsave(&gcwq->lock, flags);
21173af24433SOleg Nesterov 
21183af24433SOleg Nesterov 	switch (action) {
2119db7bccf4STejun Heo 	case CPU_DOWN_PREPARE:
2120db7bccf4STejun Heo 		/* initialize trustee and tell it to acquire the gcwq */
2121db7bccf4STejun Heo 		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
2122db7bccf4STejun Heo 		gcwq->trustee = new_trustee;
2123db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_START;
2124db7bccf4STejun Heo 		wake_up_process(gcwq->trustee);
2125db7bccf4STejun Heo 		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
2126db7bccf4STejun Heo 		break;
2127db7bccf4STejun Heo 
21283da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
2129db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_BUTCHER;
2130db7bccf4STejun Heo 		break;
2131db7bccf4STejun Heo 
2132db7bccf4STejun Heo 	case CPU_DOWN_FAILED:
2133db7bccf4STejun Heo 	case CPU_ONLINE:
2134db7bccf4STejun Heo 		if (gcwq->trustee_state != TRUSTEE_DONE) {
2135db7bccf4STejun Heo 			gcwq->trustee_state = TRUSTEE_RELEASE;
2136db7bccf4STejun Heo 			wake_up_process(gcwq->trustee);
2137db7bccf4STejun Heo 			wait_trustee_state(gcwq, TRUSTEE_DONE);
2138db7bccf4STejun Heo 		}
2139db7bccf4STejun Heo 
2140*502ca9d8STejun Heo 		/* clear ROGUE from all workers */
2141db7bccf4STejun Heo 		list_for_each_entry(worker, &gcwq->idle_list, entry)
2142db7bccf4STejun Heo 			worker->flags &= ~WORKER_ROGUE;
2143db7bccf4STejun Heo 
2144db7bccf4STejun Heo 		for_each_busy_worker(worker, i, pos, gcwq)
2145db7bccf4STejun Heo 			worker->flags &= ~WORKER_ROGUE;
21461da177e4SLinus Torvalds 		break;
21471da177e4SLinus Torvalds 	}
2148db7bccf4STejun Heo 
2149db7bccf4STejun Heo 	spin_unlock_irqrestore(&gcwq->lock, flags);
21501da177e4SLinus Torvalds 
21511537663fSTejun Heo 	return notifier_from_errno(0);
21521da177e4SLinus Torvalds }
21531da177e4SLinus Torvalds 
21542d3854a3SRusty Russell #ifdef CONFIG_SMP
21558ccad40dSRusty Russell 
21562d3854a3SRusty Russell struct work_for_cpu {
21576b44003eSAndrew Morton 	struct completion completion;
21582d3854a3SRusty Russell 	long (*fn)(void *);
21592d3854a3SRusty Russell 	void *arg;
21602d3854a3SRusty Russell 	long ret;
21612d3854a3SRusty Russell };
21622d3854a3SRusty Russell 
21636b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
21642d3854a3SRusty Russell {
21656b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
21662d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
21676b44003eSAndrew Morton 	complete(&wfc->completion);
21686b44003eSAndrew Morton 	return 0;
21692d3854a3SRusty Russell }
21702d3854a3SRusty Russell 
21712d3854a3SRusty Russell /**
21722d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
21732d3854a3SRusty Russell  * @cpu: the cpu to run on
21742d3854a3SRusty Russell  * @fn: the function to run
21752d3854a3SRusty Russell  * @arg: the function arg
21762d3854a3SRusty Russell  *
217731ad9081SRusty Russell  * This will return the value @fn returns.
217831ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
21796b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
21802d3854a3SRusty Russell  */
21812d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
21822d3854a3SRusty Russell {
21836b44003eSAndrew Morton 	struct task_struct *sub_thread;
21846b44003eSAndrew Morton 	struct work_for_cpu wfc = {
21856b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
21866b44003eSAndrew Morton 		.fn = fn,
21876b44003eSAndrew Morton 		.arg = arg,
21886b44003eSAndrew Morton 	};
21892d3854a3SRusty Russell 
21906b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
21916b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
21926b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
21936b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
21946b44003eSAndrew Morton 	wake_up_process(sub_thread);
21956b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
21962d3854a3SRusty Russell 	return wfc.ret;
21972d3854a3SRusty Russell }
21982d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
21992d3854a3SRusty Russell #endif /* CONFIG_SMP */
22002d3854a3SRusty Russell 
2201a0a1a5fdSTejun Heo #ifdef CONFIG_FREEZER
2202a0a1a5fdSTejun Heo 
2203a0a1a5fdSTejun Heo /**
2204a0a1a5fdSTejun Heo  * freeze_workqueues_begin - begin freezing workqueues
2205a0a1a5fdSTejun Heo  *
2206a0a1a5fdSTejun Heo  * Start freezing workqueues.  After this function returns, all
2207a0a1a5fdSTejun Heo  * freezeable workqueues will queue new works to their frozen_works
2208a0a1a5fdSTejun Heo  * list instead of the cwq ones.
2209a0a1a5fdSTejun Heo  *
2210a0a1a5fdSTejun Heo  * CONTEXT:
22118b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
2212a0a1a5fdSTejun Heo  */
2213a0a1a5fdSTejun Heo void freeze_workqueues_begin(void)
2214a0a1a5fdSTejun Heo {
2215a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
2216a0a1a5fdSTejun Heo 	unsigned int cpu;
2217a0a1a5fdSTejun Heo 
2218a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
2219a0a1a5fdSTejun Heo 
2220a0a1a5fdSTejun Heo 	BUG_ON(workqueue_freezing);
2221a0a1a5fdSTejun Heo 	workqueue_freezing = true;
2222a0a1a5fdSTejun Heo 
2223a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
22248b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
22258b03ae3cSTejun Heo 
22268b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
22278b03ae3cSTejun Heo 
2228db7bccf4STejun Heo 		BUG_ON(gcwq->flags & GCWQ_FREEZING);
2229db7bccf4STejun Heo 		gcwq->flags |= GCWQ_FREEZING;
2230db7bccf4STejun Heo 
2231a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
2232a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2233a0a1a5fdSTejun Heo 
2234a0a1a5fdSTejun Heo 			if (wq->flags & WQ_FREEZEABLE)
2235a0a1a5fdSTejun Heo 				cwq->max_active = 0;
2236a0a1a5fdSTejun Heo 		}
22378b03ae3cSTejun Heo 
22388b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2239a0a1a5fdSTejun Heo 	}
2240a0a1a5fdSTejun Heo 
2241a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
2242a0a1a5fdSTejun Heo }
2243a0a1a5fdSTejun Heo 
2244a0a1a5fdSTejun Heo /**
2245a0a1a5fdSTejun Heo  * freeze_workqueues_busy - are freezeable workqueues still busy?
2246a0a1a5fdSTejun Heo  *
2247a0a1a5fdSTejun Heo  * Check whether freezing is complete.  This function must be called
2248a0a1a5fdSTejun Heo  * between freeze_workqueues_begin() and thaw_workqueues().
2249a0a1a5fdSTejun Heo  *
2250a0a1a5fdSTejun Heo  * CONTEXT:
2251a0a1a5fdSTejun Heo  * Grabs and releases workqueue_lock.
2252a0a1a5fdSTejun Heo  *
2253a0a1a5fdSTejun Heo  * RETURNS:
2254a0a1a5fdSTejun Heo  * %true if some freezeable workqueues are still busy.  %false if
2255a0a1a5fdSTejun Heo  * freezing is complete.
2256a0a1a5fdSTejun Heo  */
2257a0a1a5fdSTejun Heo bool freeze_workqueues_busy(void)
2258a0a1a5fdSTejun Heo {
2259a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
2260a0a1a5fdSTejun Heo 	unsigned int cpu;
2261a0a1a5fdSTejun Heo 	bool busy = false;
2262a0a1a5fdSTejun Heo 
2263a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
2264a0a1a5fdSTejun Heo 
2265a0a1a5fdSTejun Heo 	BUG_ON(!workqueue_freezing);
2266a0a1a5fdSTejun Heo 
2267a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
2268a0a1a5fdSTejun Heo 		/*
2269a0a1a5fdSTejun Heo 		 * nr_active is monotonically decreasing.  It's safe
2270a0a1a5fdSTejun Heo 		 * to peek without lock.
2271a0a1a5fdSTejun Heo 		 */
2272a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
2273a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2274a0a1a5fdSTejun Heo 
2275a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
2276a0a1a5fdSTejun Heo 				continue;
2277a0a1a5fdSTejun Heo 
2278a0a1a5fdSTejun Heo 			BUG_ON(cwq->nr_active < 0);
2279a0a1a5fdSTejun Heo 			if (cwq->nr_active) {
2280a0a1a5fdSTejun Heo 				busy = true;
2281a0a1a5fdSTejun Heo 				goto out_unlock;
2282a0a1a5fdSTejun Heo 			}
2283a0a1a5fdSTejun Heo 		}
2284a0a1a5fdSTejun Heo 	}
2285a0a1a5fdSTejun Heo out_unlock:
2286a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
2287a0a1a5fdSTejun Heo 	return busy;
2288a0a1a5fdSTejun Heo }
2289a0a1a5fdSTejun Heo 
2290a0a1a5fdSTejun Heo /**
2291a0a1a5fdSTejun Heo  * thaw_workqueues - thaw workqueues
2292a0a1a5fdSTejun Heo  *
2293a0a1a5fdSTejun Heo  * Thaw workqueues.  Normal queueing is restored and all collected
2294a0a1a5fdSTejun Heo  * frozen works are transferred to their respective cwq worklists.
2295a0a1a5fdSTejun Heo  *
2296a0a1a5fdSTejun Heo  * CONTEXT:
22978b03ae3cSTejun Heo  * Grabs and releases workqueue_lock and gcwq->lock's.
2298a0a1a5fdSTejun Heo  */
2299a0a1a5fdSTejun Heo void thaw_workqueues(void)
2300a0a1a5fdSTejun Heo {
2301a0a1a5fdSTejun Heo 	struct workqueue_struct *wq;
2302a0a1a5fdSTejun Heo 	unsigned int cpu;
2303a0a1a5fdSTejun Heo 
2304a0a1a5fdSTejun Heo 	spin_lock(&workqueue_lock);
2305a0a1a5fdSTejun Heo 
2306a0a1a5fdSTejun Heo 	if (!workqueue_freezing)
2307a0a1a5fdSTejun Heo 		goto out_unlock;
2308a0a1a5fdSTejun Heo 
2309a0a1a5fdSTejun Heo 	for_each_possible_cpu(cpu) {
23108b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
23118b03ae3cSTejun Heo 
23128b03ae3cSTejun Heo 		spin_lock_irq(&gcwq->lock);
23138b03ae3cSTejun Heo 
2314db7bccf4STejun Heo 		BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
2315db7bccf4STejun Heo 		gcwq->flags &= ~GCWQ_FREEZING;
2316db7bccf4STejun Heo 
2317a0a1a5fdSTejun Heo 		list_for_each_entry(wq, &workqueues, list) {
2318a0a1a5fdSTejun Heo 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2319a0a1a5fdSTejun Heo 
2320a0a1a5fdSTejun Heo 			if (!(wq->flags & WQ_FREEZEABLE))
2321a0a1a5fdSTejun Heo 				continue;
2322a0a1a5fdSTejun Heo 
2323a0a1a5fdSTejun Heo 			/* restore max_active and repopulate worklist */
2324a0a1a5fdSTejun Heo 			cwq->max_active = wq->saved_max_active;
2325a0a1a5fdSTejun Heo 
2326a0a1a5fdSTejun Heo 			while (!list_empty(&cwq->delayed_works) &&
2327a0a1a5fdSTejun Heo 			       cwq->nr_active < cwq->max_active)
2328a0a1a5fdSTejun Heo 				cwq_activate_first_delayed(cwq);
2329a0a1a5fdSTejun Heo 
2330*502ca9d8STejun Heo 			/* perform delayed unbind from single cpu if empty */
2331*502ca9d8STejun Heo 			if (wq->single_cpu == gcwq->cpu &&
2332*502ca9d8STejun Heo 			    !cwq->nr_active && list_empty(&cwq->delayed_works))
2333*502ca9d8STejun Heo 				cwq_unbind_single_cpu(cwq);
2334*502ca9d8STejun Heo 
2335c8e55f36STejun Heo 			wake_up_process(cwq->worker->task);
2336a0a1a5fdSTejun Heo 		}
23378b03ae3cSTejun Heo 
23388b03ae3cSTejun Heo 		spin_unlock_irq(&gcwq->lock);
2339a0a1a5fdSTejun Heo 	}
2340a0a1a5fdSTejun Heo 
2341a0a1a5fdSTejun Heo 	workqueue_freezing = false;
2342a0a1a5fdSTejun Heo out_unlock:
2343a0a1a5fdSTejun Heo 	spin_unlock(&workqueue_lock);
2344a0a1a5fdSTejun Heo }
2345a0a1a5fdSTejun Heo #endif /* CONFIG_FREEZER */
2346a0a1a5fdSTejun Heo 
2347c12920d1SOleg Nesterov void __init init_workqueues(void)
23481da177e4SLinus Torvalds {
2349c34056a3STejun Heo 	unsigned int cpu;
2350c8e55f36STejun Heo 	int i;
2351c34056a3STejun Heo 
2352db7bccf4STejun Heo 	hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
23538b03ae3cSTejun Heo 
23548b03ae3cSTejun Heo 	/* initialize gcwqs */
23558b03ae3cSTejun Heo 	for_each_possible_cpu(cpu) {
23568b03ae3cSTejun Heo 		struct global_cwq *gcwq = get_gcwq(cpu);
23578b03ae3cSTejun Heo 
23588b03ae3cSTejun Heo 		spin_lock_init(&gcwq->lock);
23598b03ae3cSTejun Heo 		gcwq->cpu = cpu;
23608b03ae3cSTejun Heo 
2361c8e55f36STejun Heo 		INIT_LIST_HEAD(&gcwq->idle_list);
2362c8e55f36STejun Heo 		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
2363c8e55f36STejun Heo 			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
2364c8e55f36STejun Heo 
23658b03ae3cSTejun Heo 		ida_init(&gcwq->worker_ida);
2366db7bccf4STejun Heo 
2367db7bccf4STejun Heo 		gcwq->trustee_state = TRUSTEE_DONE;
2368db7bccf4STejun Heo 		init_waitqueue_head(&gcwq->trustee_wait);
23698b03ae3cSTejun Heo 	}
23708b03ae3cSTejun Heo 
23711da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
23721da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
23731da177e4SLinus Torvalds }
2374