xref: /linux-6.15/kernel/workqueue.c (revision 73f53c4a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
361da177e4SLinus Torvalds 
371da177e4SLinus Torvalds /*
384690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
394690c4abSTejun Heo  *
404690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
414690c4abSTejun Heo  *
424690c4abSTejun Heo  * L: cwq->lock protected.  Access with cwq->lock held.
434690c4abSTejun Heo  *
44*73f53c4aSTejun Heo  * F: wq->flush_mutex protected.
45*73f53c4aSTejun Heo  *
464690c4abSTejun Heo  * W: workqueue_lock protected.
474690c4abSTejun Heo  */
484690c4abSTejun Heo 
494690c4abSTejun Heo /*
50f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
510f900049STejun Heo  * possible cpu).  The lower WORK_STRUCT_FLAG_BITS of
520f900049STejun Heo  * work_struct->data are used for flags and thus cwqs need to be
530f900049STejun Heo  * aligned at two's power of the number of flag bits.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds struct cpu_workqueue_struct {
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds 	spinlock_t lock;
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds 	struct list_head worklist;
601da177e4SLinus Torvalds 	wait_queue_head_t more_work;
613af24433SOleg Nesterov 	struct work_struct *current_work;
621537663fSTejun Heo 	unsigned int		cpu;
631da177e4SLinus Torvalds 
644690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
65*73f53c4aSTejun Heo 	int			work_color;	/* L: current color */
66*73f53c4aSTejun Heo 	int			flush_color;	/* L: flushing color */
67*73f53c4aSTejun Heo 	int			nr_in_flight[WORK_NR_COLORS];
68*73f53c4aSTejun Heo 						/* L: nr of in_flight works */
6936c8b586SIngo Molnar 	struct task_struct	*thread;
700f900049STejun Heo };
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds /*
73*73f53c4aSTejun Heo  * Structure used to wait for workqueue flush.
74*73f53c4aSTejun Heo  */
75*73f53c4aSTejun Heo struct wq_flusher {
76*73f53c4aSTejun Heo 	struct list_head	list;		/* F: list of flushers */
77*73f53c4aSTejun Heo 	int			flush_color;	/* F: flush color waiting for */
78*73f53c4aSTejun Heo 	struct completion	done;		/* flush completion */
79*73f53c4aSTejun Heo };
80*73f53c4aSTejun Heo 
81*73f53c4aSTejun Heo /*
821da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
831da177e4SLinus Torvalds  * per-CPU workqueues:
841da177e4SLinus Torvalds  */
851da177e4SLinus Torvalds struct workqueue_struct {
8697e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
874690c4abSTejun Heo 	struct cpu_workqueue_struct *cpu_wq;	/* I: cwq's */
884690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
89*73f53c4aSTejun Heo 
90*73f53c4aSTejun Heo 	struct mutex		flush_mutex;	/* protects wq flushing */
91*73f53c4aSTejun Heo 	int			work_color;	/* F: current work color */
92*73f53c4aSTejun Heo 	int			flush_color;	/* F: current flush color */
93*73f53c4aSTejun Heo 	atomic_t		nr_cwqs_to_flush; /* flush in progress */
94*73f53c4aSTejun Heo 	struct wq_flusher	*first_flusher;	/* F: first flusher */
95*73f53c4aSTejun Heo 	struct list_head	flusher_queue;	/* F: flush waiters */
96*73f53c4aSTejun Heo 	struct list_head	flusher_overflow; /* F: flush overflow list */
97*73f53c4aSTejun Heo 
984690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
994e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
1004e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
1014e6045f1SJohannes Berg #endif
1021da177e4SLinus Torvalds };
1031da177e4SLinus Torvalds 
104dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
105dc186ad7SThomas Gleixner 
106dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
107dc186ad7SThomas Gleixner 
108dc186ad7SThomas Gleixner /*
109dc186ad7SThomas Gleixner  * fixup_init is called when:
110dc186ad7SThomas Gleixner  * - an active object is initialized
111dc186ad7SThomas Gleixner  */
112dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
113dc186ad7SThomas Gleixner {
114dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
115dc186ad7SThomas Gleixner 
116dc186ad7SThomas Gleixner 	switch (state) {
117dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
118dc186ad7SThomas Gleixner 		cancel_work_sync(work);
119dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
120dc186ad7SThomas Gleixner 		return 1;
121dc186ad7SThomas Gleixner 	default:
122dc186ad7SThomas Gleixner 		return 0;
123dc186ad7SThomas Gleixner 	}
124dc186ad7SThomas Gleixner }
125dc186ad7SThomas Gleixner 
126dc186ad7SThomas Gleixner /*
127dc186ad7SThomas Gleixner  * fixup_activate is called when:
128dc186ad7SThomas Gleixner  * - an active object is activated
129dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
130dc186ad7SThomas Gleixner  */
131dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
132dc186ad7SThomas Gleixner {
133dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
134dc186ad7SThomas Gleixner 
135dc186ad7SThomas Gleixner 	switch (state) {
136dc186ad7SThomas Gleixner 
137dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
138dc186ad7SThomas Gleixner 		/*
139dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
140dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
141dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
142dc186ad7SThomas Gleixner 		 */
14322df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
144dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
145dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
146dc186ad7SThomas Gleixner 			return 0;
147dc186ad7SThomas Gleixner 		}
148dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
149dc186ad7SThomas Gleixner 		return 0;
150dc186ad7SThomas Gleixner 
151dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
152dc186ad7SThomas Gleixner 		WARN_ON(1);
153dc186ad7SThomas Gleixner 
154dc186ad7SThomas Gleixner 	default:
155dc186ad7SThomas Gleixner 		return 0;
156dc186ad7SThomas Gleixner 	}
157dc186ad7SThomas Gleixner }
158dc186ad7SThomas Gleixner 
159dc186ad7SThomas Gleixner /*
160dc186ad7SThomas Gleixner  * fixup_free is called when:
161dc186ad7SThomas Gleixner  * - an active object is freed
162dc186ad7SThomas Gleixner  */
163dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
164dc186ad7SThomas Gleixner {
165dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
166dc186ad7SThomas Gleixner 
167dc186ad7SThomas Gleixner 	switch (state) {
168dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
169dc186ad7SThomas Gleixner 		cancel_work_sync(work);
170dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
171dc186ad7SThomas Gleixner 		return 1;
172dc186ad7SThomas Gleixner 	default:
173dc186ad7SThomas Gleixner 		return 0;
174dc186ad7SThomas Gleixner 	}
175dc186ad7SThomas Gleixner }
176dc186ad7SThomas Gleixner 
177dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
178dc186ad7SThomas Gleixner 	.name		= "work_struct",
179dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
180dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
181dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
182dc186ad7SThomas Gleixner };
183dc186ad7SThomas Gleixner 
184dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
185dc186ad7SThomas Gleixner {
186dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
187dc186ad7SThomas Gleixner }
188dc186ad7SThomas Gleixner 
189dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
190dc186ad7SThomas Gleixner {
191dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
192dc186ad7SThomas Gleixner }
193dc186ad7SThomas Gleixner 
194dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
195dc186ad7SThomas Gleixner {
196dc186ad7SThomas Gleixner 	if (onstack)
197dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
198dc186ad7SThomas Gleixner 	else
199dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
200dc186ad7SThomas Gleixner }
201dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
202dc186ad7SThomas Gleixner 
203dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
204dc186ad7SThomas Gleixner {
205dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
206dc186ad7SThomas Gleixner }
207dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
208dc186ad7SThomas Gleixner 
209dc186ad7SThomas Gleixner #else
210dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
211dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
212dc186ad7SThomas Gleixner #endif
213dc186ad7SThomas Gleixner 
21495402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
21595402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
2161da177e4SLinus Torvalds static LIST_HEAD(workqueues);
2171da177e4SLinus Torvalds 
2183af24433SOleg Nesterov static int singlethread_cpu __read_mostly;
219b1f4ec17SOleg Nesterov 
2204690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
2214690c4abSTejun Heo 					    struct workqueue_struct *wq)
222a848e3b6SOleg Nesterov {
223a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
224a848e3b6SOleg Nesterov }
225a848e3b6SOleg Nesterov 
2261537663fSTejun Heo static struct cpu_workqueue_struct *target_cwq(unsigned int cpu,
2271537663fSTejun Heo 					       struct workqueue_struct *wq)
2281537663fSTejun Heo {
2291537663fSTejun Heo 	if (unlikely(wq->flags & WQ_SINGLE_THREAD))
2301537663fSTejun Heo 		cpu = singlethread_cpu;
2311537663fSTejun Heo 	return get_cwq(cpu, wq);
2321537663fSTejun Heo }
2331537663fSTejun Heo 
234*73f53c4aSTejun Heo static unsigned int work_color_to_flags(int color)
235*73f53c4aSTejun Heo {
236*73f53c4aSTejun Heo 	return color << WORK_STRUCT_COLOR_SHIFT;
237*73f53c4aSTejun Heo }
238*73f53c4aSTejun Heo 
239*73f53c4aSTejun Heo static int get_work_color(struct work_struct *work)
240*73f53c4aSTejun Heo {
241*73f53c4aSTejun Heo 	return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
242*73f53c4aSTejun Heo 		((1 << WORK_STRUCT_COLOR_BITS) - 1);
243*73f53c4aSTejun Heo }
244*73f53c4aSTejun Heo 
245*73f53c4aSTejun Heo static int work_next_color(int color)
246*73f53c4aSTejun Heo {
247*73f53c4aSTejun Heo 	return (color + 1) % WORK_NR_COLORS;
248*73f53c4aSTejun Heo }
249*73f53c4aSTejun Heo 
2504594bf15SDavid Howells /*
2514594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
2524594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
2534594bf15SDavid Howells  */
254ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
2554690c4abSTejun Heo 			       struct cpu_workqueue_struct *cwq,
2564690c4abSTejun Heo 			       unsigned long extra_flags)
257365970a1SDavid Howells {
2584594bf15SDavid Howells 	BUG_ON(!work_pending(work));
2594594bf15SDavid Howells 
2604690c4abSTejun Heo 	atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
26122df02bbSTejun Heo 			WORK_STRUCT_PENDING | extra_flags);
262365970a1SDavid Howells }
263365970a1SDavid Howells 
2644d707b9fSOleg Nesterov /*
2654d707b9fSOleg Nesterov  * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
2664d707b9fSOleg Nesterov  */
2674d707b9fSOleg Nesterov static inline void clear_wq_data(struct work_struct *work)
2684d707b9fSOleg Nesterov {
2694690c4abSTejun Heo 	atomic_long_set(&work->data, work_static(work));
2704d707b9fSOleg Nesterov }
2714d707b9fSOleg Nesterov 
27264166699STejun Heo static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
273365970a1SDavid Howells {
27464166699STejun Heo 	return (void *)(atomic_long_read(&work->data) &
27564166699STejun Heo 			WORK_STRUCT_WQ_DATA_MASK);
276365970a1SDavid Howells }
277365970a1SDavid Howells 
2784690c4abSTejun Heo /**
2794690c4abSTejun Heo  * insert_work - insert a work into cwq
2804690c4abSTejun Heo  * @cwq: cwq @work belongs to
2814690c4abSTejun Heo  * @work: work to insert
2824690c4abSTejun Heo  * @head: insertion point
2834690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
2844690c4abSTejun Heo  *
2854690c4abSTejun Heo  * Insert @work into @cwq after @head.
2864690c4abSTejun Heo  *
2874690c4abSTejun Heo  * CONTEXT:
2884690c4abSTejun Heo  * spin_lock_irq(cwq->lock).
2894690c4abSTejun Heo  */
290b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
2914690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
2924690c4abSTejun Heo 			unsigned int extra_flags)
293b89deed3SOleg Nesterov {
2944690c4abSTejun Heo 	/* we own @work, set data and link */
2954690c4abSTejun Heo 	set_wq_data(work, cwq, extra_flags);
2964690c4abSTejun Heo 
2976e84d644SOleg Nesterov 	/*
2986e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
2996e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
3006e84d644SOleg Nesterov 	 */
3016e84d644SOleg Nesterov 	smp_wmb();
3024690c4abSTejun Heo 
3031a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
304b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
305b89deed3SOleg Nesterov }
306b89deed3SOleg Nesterov 
3074690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
3081da177e4SLinus Torvalds 			 struct work_struct *work)
3091da177e4SLinus Torvalds {
3101537663fSTejun Heo 	struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
3111da177e4SLinus Torvalds 	unsigned long flags;
3121da177e4SLinus Torvalds 
313dc186ad7SThomas Gleixner 	debug_work_activate(work);
3141da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
3154690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
316*73f53c4aSTejun Heo 	cwq->nr_in_flight[cwq->work_color]++;
317*73f53c4aSTejun Heo 	insert_work(cwq, work, &cwq->worklist,
318*73f53c4aSTejun Heo 		    work_color_to_flags(cwq->work_color));
3191da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
3201da177e4SLinus Torvalds }
3211da177e4SLinus Torvalds 
3220fcb78c2SRolf Eike Beer /**
3230fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
3240fcb78c2SRolf Eike Beer  * @wq: workqueue to use
3250fcb78c2SRolf Eike Beer  * @work: work to queue
3260fcb78c2SRolf Eike Beer  *
327057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
3281da177e4SLinus Torvalds  *
32900dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
33000dfcaf7SOleg Nesterov  * it can be processed by another CPU.
3311da177e4SLinus Torvalds  */
3327ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
3331da177e4SLinus Torvalds {
334ef1ca236SOleg Nesterov 	int ret;
3351da177e4SLinus Torvalds 
336ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
337a848e3b6SOleg Nesterov 	put_cpu();
338ef1ca236SOleg Nesterov 
3391da177e4SLinus Torvalds 	return ret;
3401da177e4SLinus Torvalds }
341ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
3421da177e4SLinus Torvalds 
343c1a220e7SZhang Rui /**
344c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
345c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
346c1a220e7SZhang Rui  * @wq: workqueue to use
347c1a220e7SZhang Rui  * @work: work to queue
348c1a220e7SZhang Rui  *
349c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
350c1a220e7SZhang Rui  *
351c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
352c1a220e7SZhang Rui  * can't go away.
353c1a220e7SZhang Rui  */
354c1a220e7SZhang Rui int
355c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
356c1a220e7SZhang Rui {
357c1a220e7SZhang Rui 	int ret = 0;
358c1a220e7SZhang Rui 
35922df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
3604690c4abSTejun Heo 		__queue_work(cpu, wq, work);
361c1a220e7SZhang Rui 		ret = 1;
362c1a220e7SZhang Rui 	}
363c1a220e7SZhang Rui 	return ret;
364c1a220e7SZhang Rui }
365c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
366c1a220e7SZhang Rui 
3676d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
3681da177e4SLinus Torvalds {
36952bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
370ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
3711da177e4SLinus Torvalds 
3724690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
3731da177e4SLinus Torvalds }
3741da177e4SLinus Torvalds 
3750fcb78c2SRolf Eike Beer /**
3760fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
3770fcb78c2SRolf Eike Beer  * @wq: workqueue to use
378af9997e4SRandy Dunlap  * @dwork: delayable work to queue
3790fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
3800fcb78c2SRolf Eike Beer  *
381057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
3820fcb78c2SRolf Eike Beer  */
3837ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
38452bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
3851da177e4SLinus Torvalds {
38652bad64dSDavid Howells 	if (delay == 0)
38763bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
3881da177e4SLinus Torvalds 
38963bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
3901da177e4SLinus Torvalds }
391ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
3921da177e4SLinus Torvalds 
3930fcb78c2SRolf Eike Beer /**
3940fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
3950fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
3960fcb78c2SRolf Eike Beer  * @wq: workqueue to use
397af9997e4SRandy Dunlap  * @dwork: work to queue
3980fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
3990fcb78c2SRolf Eike Beer  *
400057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
4010fcb78c2SRolf Eike Beer  */
4027a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
40352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
4047a6bc1cdSVenkatesh Pallipadi {
4057a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
40652bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
40752bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
4087a6bc1cdSVenkatesh Pallipadi 
40922df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
4107a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
4117a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
4127a6bc1cdSVenkatesh Pallipadi 
4138a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
4148a3e77ccSAndrew Liu 
415ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
4161537663fSTejun Heo 		set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0);
4177a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
41852bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
4197a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
42063bc0362SOleg Nesterov 
42163bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
4227a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
42363bc0362SOleg Nesterov 		else
42463bc0362SOleg Nesterov 			add_timer(timer);
4257a6bc1cdSVenkatesh Pallipadi 		ret = 1;
4267a6bc1cdSVenkatesh Pallipadi 	}
4277a6bc1cdSVenkatesh Pallipadi 	return ret;
4287a6bc1cdSVenkatesh Pallipadi }
429ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
4301da177e4SLinus Torvalds 
431a62428c0STejun Heo /**
432*73f53c4aSTejun Heo  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
433*73f53c4aSTejun Heo  * @cwq: cwq of interest
434*73f53c4aSTejun Heo  * @color: color of work which left the queue
435*73f53c4aSTejun Heo  *
436*73f53c4aSTejun Heo  * A work either has completed or is removed from pending queue,
437*73f53c4aSTejun Heo  * decrement nr_in_flight of its cwq and handle workqueue flushing.
438*73f53c4aSTejun Heo  *
439*73f53c4aSTejun Heo  * CONTEXT:
440*73f53c4aSTejun Heo  * spin_lock_irq(cwq->lock).
441*73f53c4aSTejun Heo  */
442*73f53c4aSTejun Heo static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
443*73f53c4aSTejun Heo {
444*73f53c4aSTejun Heo 	/* ignore uncolored works */
445*73f53c4aSTejun Heo 	if (color == WORK_NO_COLOR)
446*73f53c4aSTejun Heo 		return;
447*73f53c4aSTejun Heo 
448*73f53c4aSTejun Heo 	cwq->nr_in_flight[color]--;
449*73f53c4aSTejun Heo 
450*73f53c4aSTejun Heo 	/* is flush in progress and are we at the flushing tip? */
451*73f53c4aSTejun Heo 	if (likely(cwq->flush_color != color))
452*73f53c4aSTejun Heo 		return;
453*73f53c4aSTejun Heo 
454*73f53c4aSTejun Heo 	/* are there still in-flight works? */
455*73f53c4aSTejun Heo 	if (cwq->nr_in_flight[color])
456*73f53c4aSTejun Heo 		return;
457*73f53c4aSTejun Heo 
458*73f53c4aSTejun Heo 	/* this cwq is done, clear flush_color */
459*73f53c4aSTejun Heo 	cwq->flush_color = -1;
460*73f53c4aSTejun Heo 
461*73f53c4aSTejun Heo 	/*
462*73f53c4aSTejun Heo 	 * If this was the last cwq, wake up the first flusher.  It
463*73f53c4aSTejun Heo 	 * will handle the rest.
464*73f53c4aSTejun Heo 	 */
465*73f53c4aSTejun Heo 	if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
466*73f53c4aSTejun Heo 		complete(&cwq->wq->first_flusher->done);
467*73f53c4aSTejun Heo }
468*73f53c4aSTejun Heo 
469*73f53c4aSTejun Heo /**
470a62428c0STejun Heo  * process_one_work - process single work
471a62428c0STejun Heo  * @cwq: cwq to process work for
472a62428c0STejun Heo  * @work: work to process
473a62428c0STejun Heo  *
474a62428c0STejun Heo  * Process @work.  This function contains all the logics necessary to
475a62428c0STejun Heo  * process a single work including synchronization against and
476a62428c0STejun Heo  * interaction with other workers on the same cpu, queueing and
477a62428c0STejun Heo  * flushing.  As long as context requirement is met, any worker can
478a62428c0STejun Heo  * call this function to process a work.
479a62428c0STejun Heo  *
480a62428c0STejun Heo  * CONTEXT:
481a62428c0STejun Heo  * spin_lock_irq(cwq->lock) which is released and regrabbed.
482a62428c0STejun Heo  */
483a62428c0STejun Heo static void process_one_work(struct cpu_workqueue_struct *cwq,
484a62428c0STejun Heo 			     struct work_struct *work)
4851da177e4SLinus Torvalds {
4866bb49e59SDavid Howells 	work_func_t f = work->func;
487*73f53c4aSTejun Heo 	int work_color;
4884e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
4894e6045f1SJohannes Berg 	/*
490a62428c0STejun Heo 	 * It is permissible to free the struct work_struct from
491a62428c0STejun Heo 	 * inside the function that is called from it, this we need to
492a62428c0STejun Heo 	 * take into account for lockdep too.  To avoid bogus "held
493a62428c0STejun Heo 	 * lock freed" warnings as well as problems when looking into
494a62428c0STejun Heo 	 * work->lockdep_map, make a copy and use that here.
4954e6045f1SJohannes Berg 	 */
4964e6045f1SJohannes Berg 	struct lockdep_map lockdep_map = work->lockdep_map;
4974e6045f1SJohannes Berg #endif
498a62428c0STejun Heo 	/* claim and process */
499dc186ad7SThomas Gleixner 	debug_work_deactivate(work);
500b89deed3SOleg Nesterov 	cwq->current_work = work;
501*73f53c4aSTejun Heo 	work_color = get_work_color(work);
502a62428c0STejun Heo 	list_del_init(&work->entry);
503a62428c0STejun Heo 
504f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
5051da177e4SLinus Torvalds 
506365970a1SDavid Howells 	BUG_ON(get_wq_data(work) != cwq);
50723b2e599SOleg Nesterov 	work_clear_pending(work);
5083295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
5093295f0efSIngo Molnar 	lock_map_acquire(&lockdep_map);
51065f27f38SDavid Howells 	f(work);
5113295f0efSIngo Molnar 	lock_map_release(&lockdep_map);
5123295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
5131da177e4SLinus Torvalds 
514d5abe669SPeter Zijlstra 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
515d5abe669SPeter Zijlstra 		printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
516d5abe669SPeter Zijlstra 		       "%s/0x%08x/%d\n",
517a62428c0STejun Heo 		       current->comm, preempt_count(), task_pid_nr(current));
518d5abe669SPeter Zijlstra 		printk(KERN_ERR "    last function: ");
519d5abe669SPeter Zijlstra 		print_symbol("%s\n", (unsigned long)f);
520d5abe669SPeter Zijlstra 		debug_show_held_locks(current);
521d5abe669SPeter Zijlstra 		dump_stack();
522d5abe669SPeter Zijlstra 	}
523d5abe669SPeter Zijlstra 
524f293ea92SOleg Nesterov 	spin_lock_irq(&cwq->lock);
525a62428c0STejun Heo 
526a62428c0STejun Heo 	/* we're done with it, release */
527b89deed3SOleg Nesterov 	cwq->current_work = NULL;
528*73f53c4aSTejun Heo 	cwq_dec_nr_in_flight(cwq, work_color);
5291da177e4SLinus Torvalds }
530a62428c0STejun Heo 
531a62428c0STejun Heo static void run_workqueue(struct cpu_workqueue_struct *cwq)
532a62428c0STejun Heo {
533a62428c0STejun Heo 	spin_lock_irq(&cwq->lock);
534a62428c0STejun Heo 	while (!list_empty(&cwq->worklist)) {
535a62428c0STejun Heo 		struct work_struct *work = list_entry(cwq->worklist.next,
536a62428c0STejun Heo 						struct work_struct, entry);
537a62428c0STejun Heo 		process_one_work(cwq, work);
538a62428c0STejun Heo 	}
539f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
5401da177e4SLinus Torvalds }
5411da177e4SLinus Torvalds 
5424690c4abSTejun Heo /**
5434690c4abSTejun Heo  * worker_thread - the worker thread function
5444690c4abSTejun Heo  * @__cwq: cwq to serve
5454690c4abSTejun Heo  *
5464690c4abSTejun Heo  * The cwq worker thread function.
5474690c4abSTejun Heo  */
5481da177e4SLinus Torvalds static int worker_thread(void *__cwq)
5491da177e4SLinus Torvalds {
5501da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
5513af24433SOleg Nesterov 	DEFINE_WAIT(wait);
5521da177e4SLinus Torvalds 
55397e37d7bSTejun Heo 	if (cwq->wq->flags & WQ_FREEZEABLE)
55483144186SRafael J. Wysocki 		set_freezable();
5551da177e4SLinus Torvalds 
5563af24433SOleg Nesterov 	for (;;) {
5573af24433SOleg Nesterov 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
55814441960SOleg Nesterov 		if (!freezing(current) &&
55914441960SOleg Nesterov 		    !kthread_should_stop() &&
56014441960SOleg Nesterov 		    list_empty(&cwq->worklist))
5611da177e4SLinus Torvalds 			schedule();
5623af24433SOleg Nesterov 		finish_wait(&cwq->more_work, &wait);
5631da177e4SLinus Torvalds 
56485f4186aSOleg Nesterov 		try_to_freeze();
56585f4186aSOleg Nesterov 
56614441960SOleg Nesterov 		if (kthread_should_stop())
5673af24433SOleg Nesterov 			break;
5683af24433SOleg Nesterov 
5691537663fSTejun Heo 		if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed,
5701537663fSTejun Heo 					    get_cpu_mask(cwq->cpu))))
5711537663fSTejun Heo 			set_cpus_allowed_ptr(cwq->thread,
5721537663fSTejun Heo 					     get_cpu_mask(cwq->cpu));
5731da177e4SLinus Torvalds 		run_workqueue(cwq);
5741da177e4SLinus Torvalds 	}
5753af24433SOleg Nesterov 
5761da177e4SLinus Torvalds 	return 0;
5771da177e4SLinus Torvalds }
5781da177e4SLinus Torvalds 
579fc2e4d70SOleg Nesterov struct wq_barrier {
580fc2e4d70SOleg Nesterov 	struct work_struct	work;
581fc2e4d70SOleg Nesterov 	struct completion	done;
582fc2e4d70SOleg Nesterov };
583fc2e4d70SOleg Nesterov 
584fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
585fc2e4d70SOleg Nesterov {
586fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
587fc2e4d70SOleg Nesterov 	complete(&barr->done);
588fc2e4d70SOleg Nesterov }
589fc2e4d70SOleg Nesterov 
5904690c4abSTejun Heo /**
5914690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
5924690c4abSTejun Heo  * @cwq: cwq to insert barrier into
5934690c4abSTejun Heo  * @barr: wq_barrier to insert
5944690c4abSTejun Heo  * @head: insertion point
5954690c4abSTejun Heo  *
5964690c4abSTejun Heo  * Insert barrier @barr into @cwq before @head.
5974690c4abSTejun Heo  *
5984690c4abSTejun Heo  * CONTEXT:
5994690c4abSTejun Heo  * spin_lock_irq(cwq->lock).
6004690c4abSTejun Heo  */
60183c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
6021a4d9b0aSOleg Nesterov 			struct wq_barrier *barr, struct list_head *head)
603fc2e4d70SOleg Nesterov {
604dc186ad7SThomas Gleixner 	/*
605dc186ad7SThomas Gleixner 	 * debugobject calls are safe here even with cwq->lock locked
606dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
607dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
608dc186ad7SThomas Gleixner 	 * might deadlock.
609dc186ad7SThomas Gleixner 	 */
610dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
61122df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
612fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
61383c22520SOleg Nesterov 
614dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
615*73f53c4aSTejun Heo 	insert_work(cwq, &barr->work, head, work_color_to_flags(WORK_NO_COLOR));
616fc2e4d70SOleg Nesterov }
617fc2e4d70SOleg Nesterov 
618*73f53c4aSTejun Heo /**
619*73f53c4aSTejun Heo  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
620*73f53c4aSTejun Heo  * @wq: workqueue being flushed
621*73f53c4aSTejun Heo  * @flush_color: new flush color, < 0 for no-op
622*73f53c4aSTejun Heo  * @work_color: new work color, < 0 for no-op
623*73f53c4aSTejun Heo  *
624*73f53c4aSTejun Heo  * Prepare cwqs for workqueue flushing.
625*73f53c4aSTejun Heo  *
626*73f53c4aSTejun Heo  * If @flush_color is non-negative, flush_color on all cwqs should be
627*73f53c4aSTejun Heo  * -1.  If no cwq has in-flight commands at the specified color, all
628*73f53c4aSTejun Heo  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
629*73f53c4aSTejun Heo  * has in flight commands, its cwq->flush_color is set to
630*73f53c4aSTejun Heo  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
631*73f53c4aSTejun Heo  * wakeup logic is armed and %true is returned.
632*73f53c4aSTejun Heo  *
633*73f53c4aSTejun Heo  * The caller should have initialized @wq->first_flusher prior to
634*73f53c4aSTejun Heo  * calling this function with non-negative @flush_color.  If
635*73f53c4aSTejun Heo  * @flush_color is negative, no flush color update is done and %false
636*73f53c4aSTejun Heo  * is returned.
637*73f53c4aSTejun Heo  *
638*73f53c4aSTejun Heo  * If @work_color is non-negative, all cwqs should have the same
639*73f53c4aSTejun Heo  * work_color which is previous to @work_color and all will be
640*73f53c4aSTejun Heo  * advanced to @work_color.
641*73f53c4aSTejun Heo  *
642*73f53c4aSTejun Heo  * CONTEXT:
643*73f53c4aSTejun Heo  * mutex_lock(wq->flush_mutex).
644*73f53c4aSTejun Heo  *
645*73f53c4aSTejun Heo  * RETURNS:
646*73f53c4aSTejun Heo  * %true if @flush_color >= 0 and there's something to flush.  %false
647*73f53c4aSTejun Heo  * otherwise.
648*73f53c4aSTejun Heo  */
649*73f53c4aSTejun Heo static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
650*73f53c4aSTejun Heo 				      int flush_color, int work_color)
6511da177e4SLinus Torvalds {
652*73f53c4aSTejun Heo 	bool wait = false;
653*73f53c4aSTejun Heo 	unsigned int cpu;
6541da177e4SLinus Torvalds 
655*73f53c4aSTejun Heo 	if (flush_color >= 0) {
656*73f53c4aSTejun Heo 		BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
657*73f53c4aSTejun Heo 		atomic_set(&wq->nr_cwqs_to_flush, 1);
658*73f53c4aSTejun Heo 	}
659*73f53c4aSTejun Heo 
660*73f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
661*73f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
6622355b70fSLai Jiangshan 
66383c22520SOleg Nesterov 		spin_lock_irq(&cwq->lock);
664*73f53c4aSTejun Heo 
665*73f53c4aSTejun Heo 		if (flush_color >= 0) {
666*73f53c4aSTejun Heo 			BUG_ON(cwq->flush_color != -1);
667*73f53c4aSTejun Heo 
668*73f53c4aSTejun Heo 			if (cwq->nr_in_flight[flush_color]) {
669*73f53c4aSTejun Heo 				cwq->flush_color = flush_color;
670*73f53c4aSTejun Heo 				atomic_inc(&wq->nr_cwqs_to_flush);
671*73f53c4aSTejun Heo 				wait = true;
67283c22520SOleg Nesterov 			}
673*73f53c4aSTejun Heo 		}
674*73f53c4aSTejun Heo 
675*73f53c4aSTejun Heo 		if (work_color >= 0) {
676*73f53c4aSTejun Heo 			BUG_ON(work_color != work_next_color(cwq->work_color));
677*73f53c4aSTejun Heo 			cwq->work_color = work_color;
678*73f53c4aSTejun Heo 		}
679*73f53c4aSTejun Heo 
68083c22520SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
681dc186ad7SThomas Gleixner 	}
68214441960SOleg Nesterov 
683*73f53c4aSTejun Heo 	if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
684*73f53c4aSTejun Heo 		complete(&wq->first_flusher->done);
685*73f53c4aSTejun Heo 
686*73f53c4aSTejun Heo 	return wait;
68783c22520SOleg Nesterov }
6881da177e4SLinus Torvalds 
6890fcb78c2SRolf Eike Beer /**
6901da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
6910fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
6921da177e4SLinus Torvalds  *
6931da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
6941da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
6951da177e4SLinus Torvalds  *
696fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
697fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
6981da177e4SLinus Torvalds  */
6997ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
7001da177e4SLinus Torvalds {
701*73f53c4aSTejun Heo 	struct wq_flusher this_flusher = {
702*73f53c4aSTejun Heo 		.list = LIST_HEAD_INIT(this_flusher.list),
703*73f53c4aSTejun Heo 		.flush_color = -1,
704*73f53c4aSTejun Heo 		.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
705*73f53c4aSTejun Heo 	};
706*73f53c4aSTejun Heo 	int next_color;
707b1f4ec17SOleg Nesterov 
7083295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
7093295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
710*73f53c4aSTejun Heo 
711*73f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
712*73f53c4aSTejun Heo 
713*73f53c4aSTejun Heo 	/*
714*73f53c4aSTejun Heo 	 * Start-to-wait phase
715*73f53c4aSTejun Heo 	 */
716*73f53c4aSTejun Heo 	next_color = work_next_color(wq->work_color);
717*73f53c4aSTejun Heo 
718*73f53c4aSTejun Heo 	if (next_color != wq->flush_color) {
719*73f53c4aSTejun Heo 		/*
720*73f53c4aSTejun Heo 		 * Color space is not full.  The current work_color
721*73f53c4aSTejun Heo 		 * becomes our flush_color and work_color is advanced
722*73f53c4aSTejun Heo 		 * by one.
723*73f53c4aSTejun Heo 		 */
724*73f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow));
725*73f53c4aSTejun Heo 		this_flusher.flush_color = wq->work_color;
726*73f53c4aSTejun Heo 		wq->work_color = next_color;
727*73f53c4aSTejun Heo 
728*73f53c4aSTejun Heo 		if (!wq->first_flusher) {
729*73f53c4aSTejun Heo 			/* no flush in progress, become the first flusher */
730*73f53c4aSTejun Heo 			BUG_ON(wq->flush_color != this_flusher.flush_color);
731*73f53c4aSTejun Heo 
732*73f53c4aSTejun Heo 			wq->first_flusher = &this_flusher;
733*73f53c4aSTejun Heo 
734*73f53c4aSTejun Heo 			if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
735*73f53c4aSTejun Heo 						       wq->work_color)) {
736*73f53c4aSTejun Heo 				/* nothing to flush, done */
737*73f53c4aSTejun Heo 				wq->flush_color = next_color;
738*73f53c4aSTejun Heo 				wq->first_flusher = NULL;
739*73f53c4aSTejun Heo 				goto out_unlock;
740*73f53c4aSTejun Heo 			}
741*73f53c4aSTejun Heo 		} else {
742*73f53c4aSTejun Heo 			/* wait in queue */
743*73f53c4aSTejun Heo 			BUG_ON(wq->flush_color == this_flusher.flush_color);
744*73f53c4aSTejun Heo 			list_add_tail(&this_flusher.list, &wq->flusher_queue);
745*73f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
746*73f53c4aSTejun Heo 		}
747*73f53c4aSTejun Heo 	} else {
748*73f53c4aSTejun Heo 		/*
749*73f53c4aSTejun Heo 		 * Oops, color space is full, wait on overflow queue.
750*73f53c4aSTejun Heo 		 * The next flush completion will assign us
751*73f53c4aSTejun Heo 		 * flush_color and transfer to flusher_queue.
752*73f53c4aSTejun Heo 		 */
753*73f53c4aSTejun Heo 		list_add_tail(&this_flusher.list, &wq->flusher_overflow);
754*73f53c4aSTejun Heo 	}
755*73f53c4aSTejun Heo 
756*73f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
757*73f53c4aSTejun Heo 
758*73f53c4aSTejun Heo 	wait_for_completion(&this_flusher.done);
759*73f53c4aSTejun Heo 
760*73f53c4aSTejun Heo 	/*
761*73f53c4aSTejun Heo 	 * Wake-up-and-cascade phase
762*73f53c4aSTejun Heo 	 *
763*73f53c4aSTejun Heo 	 * First flushers are responsible for cascading flushes and
764*73f53c4aSTejun Heo 	 * handling overflow.  Non-first flushers can simply return.
765*73f53c4aSTejun Heo 	 */
766*73f53c4aSTejun Heo 	if (wq->first_flusher != &this_flusher)
767*73f53c4aSTejun Heo 		return;
768*73f53c4aSTejun Heo 
769*73f53c4aSTejun Heo 	mutex_lock(&wq->flush_mutex);
770*73f53c4aSTejun Heo 
771*73f53c4aSTejun Heo 	wq->first_flusher = NULL;
772*73f53c4aSTejun Heo 
773*73f53c4aSTejun Heo 	BUG_ON(!list_empty(&this_flusher.list));
774*73f53c4aSTejun Heo 	BUG_ON(wq->flush_color != this_flusher.flush_color);
775*73f53c4aSTejun Heo 
776*73f53c4aSTejun Heo 	while (true) {
777*73f53c4aSTejun Heo 		struct wq_flusher *next, *tmp;
778*73f53c4aSTejun Heo 
779*73f53c4aSTejun Heo 		/* complete all the flushers sharing the current flush color */
780*73f53c4aSTejun Heo 		list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
781*73f53c4aSTejun Heo 			if (next->flush_color != wq->flush_color)
782*73f53c4aSTejun Heo 				break;
783*73f53c4aSTejun Heo 			list_del_init(&next->list);
784*73f53c4aSTejun Heo 			complete(&next->done);
785*73f53c4aSTejun Heo 		}
786*73f53c4aSTejun Heo 
787*73f53c4aSTejun Heo 		BUG_ON(!list_empty(&wq->flusher_overflow) &&
788*73f53c4aSTejun Heo 		       wq->flush_color != work_next_color(wq->work_color));
789*73f53c4aSTejun Heo 
790*73f53c4aSTejun Heo 		/* this flush_color is finished, advance by one */
791*73f53c4aSTejun Heo 		wq->flush_color = work_next_color(wq->flush_color);
792*73f53c4aSTejun Heo 
793*73f53c4aSTejun Heo 		/* one color has been freed, handle overflow queue */
794*73f53c4aSTejun Heo 		if (!list_empty(&wq->flusher_overflow)) {
795*73f53c4aSTejun Heo 			/*
796*73f53c4aSTejun Heo 			 * Assign the same color to all overflowed
797*73f53c4aSTejun Heo 			 * flushers, advance work_color and append to
798*73f53c4aSTejun Heo 			 * flusher_queue.  This is the start-to-wait
799*73f53c4aSTejun Heo 			 * phase for these overflowed flushers.
800*73f53c4aSTejun Heo 			 */
801*73f53c4aSTejun Heo 			list_for_each_entry(tmp, &wq->flusher_overflow, list)
802*73f53c4aSTejun Heo 				tmp->flush_color = wq->work_color;
803*73f53c4aSTejun Heo 
804*73f53c4aSTejun Heo 			wq->work_color = work_next_color(wq->work_color);
805*73f53c4aSTejun Heo 
806*73f53c4aSTejun Heo 			list_splice_tail_init(&wq->flusher_overflow,
807*73f53c4aSTejun Heo 					      &wq->flusher_queue);
808*73f53c4aSTejun Heo 			flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
809*73f53c4aSTejun Heo 		}
810*73f53c4aSTejun Heo 
811*73f53c4aSTejun Heo 		if (list_empty(&wq->flusher_queue)) {
812*73f53c4aSTejun Heo 			BUG_ON(wq->flush_color != wq->work_color);
813*73f53c4aSTejun Heo 			break;
814*73f53c4aSTejun Heo 		}
815*73f53c4aSTejun Heo 
816*73f53c4aSTejun Heo 		/*
817*73f53c4aSTejun Heo 		 * Need to flush more colors.  Make the next flusher
818*73f53c4aSTejun Heo 		 * the new first flusher and arm cwqs.
819*73f53c4aSTejun Heo 		 */
820*73f53c4aSTejun Heo 		BUG_ON(wq->flush_color == wq->work_color);
821*73f53c4aSTejun Heo 		BUG_ON(wq->flush_color != next->flush_color);
822*73f53c4aSTejun Heo 
823*73f53c4aSTejun Heo 		list_del_init(&next->list);
824*73f53c4aSTejun Heo 		wq->first_flusher = next;
825*73f53c4aSTejun Heo 
826*73f53c4aSTejun Heo 		if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
827*73f53c4aSTejun Heo 			break;
828*73f53c4aSTejun Heo 
829*73f53c4aSTejun Heo 		/*
830*73f53c4aSTejun Heo 		 * Meh... this color is already done, clear first
831*73f53c4aSTejun Heo 		 * flusher and repeat cascading.
832*73f53c4aSTejun Heo 		 */
833*73f53c4aSTejun Heo 		wq->first_flusher = NULL;
834*73f53c4aSTejun Heo 	}
835*73f53c4aSTejun Heo 
836*73f53c4aSTejun Heo out_unlock:
837*73f53c4aSTejun Heo 	mutex_unlock(&wq->flush_mutex);
8381da177e4SLinus Torvalds }
839ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
8401da177e4SLinus Torvalds 
841db700897SOleg Nesterov /**
842db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
843db700897SOleg Nesterov  * @work: the work which is to be flushed
844db700897SOleg Nesterov  *
845a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
846a67da70dSOleg Nesterov  *
847db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
848db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
849db700897SOleg Nesterov  * sense to use this function.
850db700897SOleg Nesterov  */
851db700897SOleg Nesterov int flush_work(struct work_struct *work)
852db700897SOleg Nesterov {
853db700897SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
854db700897SOleg Nesterov 	struct list_head *prev;
855db700897SOleg Nesterov 	struct wq_barrier barr;
856db700897SOleg Nesterov 
857db700897SOleg Nesterov 	might_sleep();
858db700897SOleg Nesterov 	cwq = get_wq_data(work);
859db700897SOleg Nesterov 	if (!cwq)
860db700897SOleg Nesterov 		return 0;
861db700897SOleg Nesterov 
8623295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
8633295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
864a67da70dSOleg Nesterov 
865db700897SOleg Nesterov 	spin_lock_irq(&cwq->lock);
866db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
867db700897SOleg Nesterov 		/*
868db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
869db700897SOleg Nesterov 		 * If it was re-queued under us we are not going to wait.
870db700897SOleg Nesterov 		 */
871db700897SOleg Nesterov 		smp_rmb();
872db700897SOleg Nesterov 		if (unlikely(cwq != get_wq_data(work)))
8734690c4abSTejun Heo 			goto already_gone;
874db700897SOleg Nesterov 		prev = &work->entry;
875db700897SOleg Nesterov 	} else {
876db700897SOleg Nesterov 		if (cwq->current_work != work)
8774690c4abSTejun Heo 			goto already_gone;
878db700897SOleg Nesterov 		prev = &cwq->worklist;
879db700897SOleg Nesterov 	}
880db700897SOleg Nesterov 	insert_wq_barrier(cwq, &barr, prev->next);
881db700897SOleg Nesterov 
8824690c4abSTejun Heo 	spin_unlock_irq(&cwq->lock);
883db700897SOleg Nesterov 	wait_for_completion(&barr.done);
884dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
885db700897SOleg Nesterov 	return 1;
8864690c4abSTejun Heo already_gone:
8874690c4abSTejun Heo 	spin_unlock_irq(&cwq->lock);
8884690c4abSTejun Heo 	return 0;
889db700897SOleg Nesterov }
890db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
891db700897SOleg Nesterov 
8926e84d644SOleg Nesterov /*
8931f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
8946e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
8956e84d644SOleg Nesterov  */
8966e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
8976e84d644SOleg Nesterov {
8986e84d644SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
8991f1f642eSOleg Nesterov 	int ret = -1;
9006e84d644SOleg Nesterov 
90122df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
9021f1f642eSOleg Nesterov 		return 0;
9036e84d644SOleg Nesterov 
9046e84d644SOleg Nesterov 	/*
9056e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
9066e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
9076e84d644SOleg Nesterov 	 */
9086e84d644SOleg Nesterov 
9096e84d644SOleg Nesterov 	cwq = get_wq_data(work);
9106e84d644SOleg Nesterov 	if (!cwq)
9116e84d644SOleg Nesterov 		return ret;
9126e84d644SOleg Nesterov 
9136e84d644SOleg Nesterov 	spin_lock_irq(&cwq->lock);
9146e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
9156e84d644SOleg Nesterov 		/*
9166e84d644SOleg Nesterov 		 * This work is queued, but perhaps we locked the wrong cwq.
9176e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
9186e84d644SOleg Nesterov 		 * insert_work()->wmb().
9196e84d644SOleg Nesterov 		 */
9206e84d644SOleg Nesterov 		smp_rmb();
9216e84d644SOleg Nesterov 		if (cwq == get_wq_data(work)) {
922dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
9236e84d644SOleg Nesterov 			list_del_init(&work->entry);
924*73f53c4aSTejun Heo 			cwq_dec_nr_in_flight(cwq, get_work_color(work));
9256e84d644SOleg Nesterov 			ret = 1;
9266e84d644SOleg Nesterov 		}
9276e84d644SOleg Nesterov 	}
9286e84d644SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
9296e84d644SOleg Nesterov 
9306e84d644SOleg Nesterov 	return ret;
9316e84d644SOleg Nesterov }
9326e84d644SOleg Nesterov 
9336e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
934b89deed3SOleg Nesterov 				struct work_struct *work)
935b89deed3SOleg Nesterov {
936b89deed3SOleg Nesterov 	struct wq_barrier barr;
937b89deed3SOleg Nesterov 	int running = 0;
938b89deed3SOleg Nesterov 
939b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
940b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
9411a4d9b0aSOleg Nesterov 		insert_wq_barrier(cwq, &barr, cwq->worklist.next);
942b89deed3SOleg Nesterov 		running = 1;
943b89deed3SOleg Nesterov 	}
944b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
945b89deed3SOleg Nesterov 
946dc186ad7SThomas Gleixner 	if (unlikely(running)) {
947b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
948dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
949dc186ad7SThomas Gleixner 	}
950b89deed3SOleg Nesterov }
951b89deed3SOleg Nesterov 
9526e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
953b89deed3SOleg Nesterov {
954b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
95528e53bddSOleg Nesterov 	struct workqueue_struct *wq;
956b1f4ec17SOleg Nesterov 	int cpu;
957b89deed3SOleg Nesterov 
958f293ea92SOleg Nesterov 	might_sleep();
959f293ea92SOleg Nesterov 
9603295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
9613295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
9624e6045f1SJohannes Berg 
963b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
964b89deed3SOleg Nesterov 	if (!cwq)
9653af24433SOleg Nesterov 		return;
966b89deed3SOleg Nesterov 
96728e53bddSOleg Nesterov 	wq = cwq->wq;
96828e53bddSOleg Nesterov 
9691537663fSTejun Heo 	for_each_possible_cpu(cpu)
9704690c4abSTejun Heo 		wait_on_cpu_work(get_cwq(cpu, wq), work);
9716e84d644SOleg Nesterov }
9726e84d644SOleg Nesterov 
9731f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
9741f1f642eSOleg Nesterov 				struct timer_list* timer)
9751f1f642eSOleg Nesterov {
9761f1f642eSOleg Nesterov 	int ret;
9771f1f642eSOleg Nesterov 
9781f1f642eSOleg Nesterov 	do {
9791f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
9801f1f642eSOleg Nesterov 		if (!ret)
9811f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
9821f1f642eSOleg Nesterov 		wait_on_work(work);
9831f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
9841f1f642eSOleg Nesterov 
9854d707b9fSOleg Nesterov 	clear_wq_data(work);
9861f1f642eSOleg Nesterov 	return ret;
9871f1f642eSOleg Nesterov }
9881f1f642eSOleg Nesterov 
9896e84d644SOleg Nesterov /**
9906e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
9916e84d644SOleg Nesterov  * @work: the work which is to be flushed
9926e84d644SOleg Nesterov  *
9931f1f642eSOleg Nesterov  * Returns true if @work was pending.
9941f1f642eSOleg Nesterov  *
9956e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
9966e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
9976e84d644SOleg Nesterov  * has completed.
9986e84d644SOleg Nesterov  *
9996e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
10006e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
10016e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
10026e84d644SOleg Nesterov  * workqueue.
10036e84d644SOleg Nesterov  *
10046e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
10056e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
10066e84d644SOleg Nesterov  *
10076e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
10086e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
10096e84d644SOleg Nesterov  */
10101f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
10116e84d644SOleg Nesterov {
10121f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
1013b89deed3SOleg Nesterov }
101428e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
1015b89deed3SOleg Nesterov 
10166e84d644SOleg Nesterov /**
1017f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
10186e84d644SOleg Nesterov  * @dwork: the delayed work struct
10196e84d644SOleg Nesterov  *
10201f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
10211f1f642eSOleg Nesterov  *
10226e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
10236e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
10246e84d644SOleg Nesterov  */
10251f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
10266e84d644SOleg Nesterov {
10271f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
10286e84d644SOleg Nesterov }
1029f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
10301da177e4SLinus Torvalds 
10316e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
10321da177e4SLinus Torvalds 
10330fcb78c2SRolf Eike Beer /**
10340fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
10350fcb78c2SRolf Eike Beer  * @work: job to be done
10360fcb78c2SRolf Eike Beer  *
10375b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
10385b0f437dSBart Van Assche  * non-zero otherwise.
10395b0f437dSBart Van Assche  *
10405b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
10415b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
10425b0f437dSBart Van Assche  * workqueue otherwise.
10430fcb78c2SRolf Eike Beer  */
10447ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
10451da177e4SLinus Torvalds {
10461da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
10471da177e4SLinus Torvalds }
1048ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
10491da177e4SLinus Torvalds 
1050c1a220e7SZhang Rui /*
1051c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
1052c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
1053c1a220e7SZhang Rui  * @work: job to be done
1054c1a220e7SZhang Rui  *
1055c1a220e7SZhang Rui  * This puts a job on a specific cpu
1056c1a220e7SZhang Rui  */
1057c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
1058c1a220e7SZhang Rui {
1059c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
1060c1a220e7SZhang Rui }
1061c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
1062c1a220e7SZhang Rui 
10630fcb78c2SRolf Eike Beer /**
10640fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
106552bad64dSDavid Howells  * @dwork: job to be done
106652bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
10670fcb78c2SRolf Eike Beer  *
10680fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
10690fcb78c2SRolf Eike Beer  * workqueue.
10700fcb78c2SRolf Eike Beer  */
10717ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
107282f67cd9SIngo Molnar 					unsigned long delay)
10731da177e4SLinus Torvalds {
107452bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
10751da177e4SLinus Torvalds }
1076ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
10771da177e4SLinus Torvalds 
10780fcb78c2SRolf Eike Beer /**
10798c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
10808c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
10818c53e463SLinus Torvalds  *
10828c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
10838c53e463SLinus Torvalds  */
10848c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
10858c53e463SLinus Torvalds {
10868c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
10874690c4abSTejun Heo 		__queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
10884690c4abSTejun Heo 			     &dwork->work);
10898c53e463SLinus Torvalds 		put_cpu();
10908c53e463SLinus Torvalds 	}
10918c53e463SLinus Torvalds 	flush_work(&dwork->work);
10928c53e463SLinus Torvalds }
10938c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
10948c53e463SLinus Torvalds 
10958c53e463SLinus Torvalds /**
10960fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
10970fcb78c2SRolf Eike Beer  * @cpu: cpu to use
109852bad64dSDavid Howells  * @dwork: job to be done
10990fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
11000fcb78c2SRolf Eike Beer  *
11010fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
11020fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
11030fcb78c2SRolf Eike Beer  */
11041da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
110552bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
11061da177e4SLinus Torvalds {
110752bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
11081da177e4SLinus Torvalds }
1109ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
11101da177e4SLinus Torvalds 
1111b6136773SAndrew Morton /**
1112b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
1113b6136773SAndrew Morton  * @func: the function to call
1114b6136773SAndrew Morton  *
1115b6136773SAndrew Morton  * Returns zero on success.
1116b6136773SAndrew Morton  * Returns -ve errno on failure.
1117b6136773SAndrew Morton  *
1118b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
1119b6136773SAndrew Morton  */
112065f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
112115316ba8SChristoph Lameter {
112215316ba8SChristoph Lameter 	int cpu;
112365a64464SAndi Kleen 	int orig = -1;
1124b6136773SAndrew Morton 	struct work_struct *works;
112515316ba8SChristoph Lameter 
1126b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
1127b6136773SAndrew Morton 	if (!works)
112815316ba8SChristoph Lameter 		return -ENOMEM;
1129b6136773SAndrew Morton 
113095402b38SGautham R Shenoy 	get_online_cpus();
113193981800STejun Heo 
113293981800STejun Heo 	/*
113393981800STejun Heo 	 * When running in keventd don't schedule a work item on
113493981800STejun Heo 	 * itself.  Can just call directly because the work queue is
113593981800STejun Heo 	 * already bound.  This also is faster.
113693981800STejun Heo 	 */
113793981800STejun Heo 	if (current_is_keventd())
113893981800STejun Heo 		orig = raw_smp_processor_id();
113993981800STejun Heo 
114015316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
11419bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
11429bfb1839SIngo Molnar 
11439bfb1839SIngo Molnar 		INIT_WORK(work, func);
114493981800STejun Heo 		if (cpu != orig)
11458de6d308SOleg Nesterov 			schedule_work_on(cpu, work);
114615316ba8SChristoph Lameter 	}
114793981800STejun Heo 	if (orig >= 0)
114893981800STejun Heo 		func(per_cpu_ptr(works, orig));
114993981800STejun Heo 
115093981800STejun Heo 	for_each_online_cpu(cpu)
11518616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
115293981800STejun Heo 
115395402b38SGautham R Shenoy 	put_online_cpus();
1154b6136773SAndrew Morton 	free_percpu(works);
115515316ba8SChristoph Lameter 	return 0;
115615316ba8SChristoph Lameter }
115715316ba8SChristoph Lameter 
1158eef6a7d5SAlan Stern /**
1159eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
1160eef6a7d5SAlan Stern  *
1161eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
1162eef6a7d5SAlan Stern  * completion.
1163eef6a7d5SAlan Stern  *
1164eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
1165eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
1166eef6a7d5SAlan Stern  * will lead to deadlock:
1167eef6a7d5SAlan Stern  *
1168eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
1169eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
1170eef6a7d5SAlan Stern  *
1171eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
1172eef6a7d5SAlan Stern  *
1173eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
1174eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
1175eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
1176eef6a7d5SAlan Stern  *
1177eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
1178eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
1179eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
1180eef6a7d5SAlan Stern  * cancel_work_sync() instead.
1181eef6a7d5SAlan Stern  */
11821da177e4SLinus Torvalds void flush_scheduled_work(void)
11831da177e4SLinus Torvalds {
11841da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
11851da177e4SLinus Torvalds }
1186ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
11871da177e4SLinus Torvalds 
11881da177e4SLinus Torvalds /**
11891fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
11901fa44ecaSJames Bottomley  * @fn:		the function to execute
11911fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
11921fa44ecaSJames Bottomley  *		be available when the work executes)
11931fa44ecaSJames Bottomley  *
11941fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
11951fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
11961fa44ecaSJames Bottomley  *
11971fa44ecaSJames Bottomley  * Returns:	0 - function was executed
11981fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
11991fa44ecaSJames Bottomley  */
120065f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
12011fa44ecaSJames Bottomley {
12021fa44ecaSJames Bottomley 	if (!in_interrupt()) {
120365f27f38SDavid Howells 		fn(&ew->work);
12041fa44ecaSJames Bottomley 		return 0;
12051fa44ecaSJames Bottomley 	}
12061fa44ecaSJames Bottomley 
120765f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
12081fa44ecaSJames Bottomley 	schedule_work(&ew->work);
12091fa44ecaSJames Bottomley 
12101fa44ecaSJames Bottomley 	return 1;
12111fa44ecaSJames Bottomley }
12121fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
12131fa44ecaSJames Bottomley 
12141da177e4SLinus Torvalds int keventd_up(void)
12151da177e4SLinus Torvalds {
12161da177e4SLinus Torvalds 	return keventd_wq != NULL;
12171da177e4SLinus Torvalds }
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds int current_is_keventd(void)
12201da177e4SLinus Torvalds {
12211da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
1222d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
12231da177e4SLinus Torvalds 	int ret = 0;
12241da177e4SLinus Torvalds 
12251da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
12261da177e4SLinus Torvalds 
12271537663fSTejun Heo 	cwq = get_cwq(cpu, keventd_wq);
12281da177e4SLinus Torvalds 	if (current == cwq->thread)
12291da177e4SLinus Torvalds 		ret = 1;
12301da177e4SLinus Torvalds 
12311da177e4SLinus Torvalds 	return ret;
12321da177e4SLinus Torvalds 
12331da177e4SLinus Torvalds }
12341da177e4SLinus Torvalds 
12350f900049STejun Heo static struct cpu_workqueue_struct *alloc_cwqs(void)
12360f900049STejun Heo {
12370f900049STejun Heo 	/*
12380f900049STejun Heo 	 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
12390f900049STejun Heo 	 * Make sure that the alignment isn't lower than that of
12400f900049STejun Heo 	 * unsigned long long.
12410f900049STejun Heo 	 */
12420f900049STejun Heo 	const size_t size = sizeof(struct cpu_workqueue_struct);
12430f900049STejun Heo 	const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
12440f900049STejun Heo 				   __alignof__(unsigned long long));
12450f900049STejun Heo 	struct cpu_workqueue_struct *cwqs;
12460f900049STejun Heo #ifndef CONFIG_SMP
12470f900049STejun Heo 	void *ptr;
12480f900049STejun Heo 
12490f900049STejun Heo 	/*
12500f900049STejun Heo 	 * On UP, percpu allocator doesn't honor alignment parameter
12510f900049STejun Heo 	 * and simply uses arch-dependent default.  Allocate enough
12520f900049STejun Heo 	 * room to align cwq and put an extra pointer at the end
12530f900049STejun Heo 	 * pointing back to the originally allocated pointer which
12540f900049STejun Heo 	 * will be used for free.
12550f900049STejun Heo 	 *
12560f900049STejun Heo 	 * FIXME: This really belongs to UP percpu code.  Update UP
12570f900049STejun Heo 	 * percpu code to honor alignment and remove this ugliness.
12580f900049STejun Heo 	 */
12590f900049STejun Heo 	ptr = __alloc_percpu(size + align + sizeof(void *), 1);
12600f900049STejun Heo 	cwqs = PTR_ALIGN(ptr, align);
12610f900049STejun Heo 	*(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
12620f900049STejun Heo #else
12630f900049STejun Heo 	/* On SMP, percpu allocator can do it itself */
12640f900049STejun Heo 	cwqs = __alloc_percpu(size, align);
12650f900049STejun Heo #endif
12660f900049STejun Heo 	/* just in case, make sure it's actually aligned */
12670f900049STejun Heo 	BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
12680f900049STejun Heo 	return cwqs;
12690f900049STejun Heo }
12700f900049STejun Heo 
12710f900049STejun Heo static void free_cwqs(struct cpu_workqueue_struct *cwqs)
12720f900049STejun Heo {
12730f900049STejun Heo #ifndef CONFIG_SMP
12740f900049STejun Heo 	/* on UP, the pointer to free is stored right after the cwq */
12750f900049STejun Heo 	if (cwqs)
12760f900049STejun Heo 		free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
12770f900049STejun Heo #else
12780f900049STejun Heo 	free_percpu(cwqs);
12790f900049STejun Heo #endif
12800f900049STejun Heo }
12810f900049STejun Heo 
12823af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
12833af24433SOleg Nesterov {
12843af24433SOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
12853af24433SOleg Nesterov 	struct task_struct *p;
12863af24433SOleg Nesterov 
12871537663fSTejun Heo 	p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
12883af24433SOleg Nesterov 	/*
12893af24433SOleg Nesterov 	 * Nobody can add the work_struct to this cwq,
12903af24433SOleg Nesterov 	 *	if (caller is __create_workqueue)
12913af24433SOleg Nesterov 	 *		nobody should see this wq
12923af24433SOleg Nesterov 	 *	else // caller is CPU_UP_PREPARE
12933af24433SOleg Nesterov 	 *		cpu is not on cpu_online_map
12943af24433SOleg Nesterov 	 * so we can abort safely.
12953af24433SOleg Nesterov 	 */
12963af24433SOleg Nesterov 	if (IS_ERR(p))
12973af24433SOleg Nesterov 		return PTR_ERR(p);
12983af24433SOleg Nesterov 	cwq->thread = p;
12993af24433SOleg Nesterov 
13003af24433SOleg Nesterov 	return 0;
13013af24433SOleg Nesterov }
13023af24433SOleg Nesterov 
130306ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
130406ba38a9SOleg Nesterov {
130506ba38a9SOleg Nesterov 	struct task_struct *p = cwq->thread;
130606ba38a9SOleg Nesterov 
130706ba38a9SOleg Nesterov 	if (p != NULL) {
130806ba38a9SOleg Nesterov 		if (cpu >= 0)
130906ba38a9SOleg Nesterov 			kthread_bind(p, cpu);
131006ba38a9SOleg Nesterov 		wake_up_process(p);
131106ba38a9SOleg Nesterov 	}
131206ba38a9SOleg Nesterov }
131306ba38a9SOleg Nesterov 
13144e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
131597e37d7bSTejun Heo 						unsigned int flags,
1316eb13ba87SJohannes Berg 						struct lock_class_key *key,
1317eb13ba87SJohannes Berg 						const char *lock_name)
13183af24433SOleg Nesterov {
13191537663fSTejun Heo 	bool singlethread = flags & WQ_SINGLE_THREAD;
13203af24433SOleg Nesterov 	struct workqueue_struct *wq;
13213af24433SOleg Nesterov 	int err = 0, cpu;
13223af24433SOleg Nesterov 
13233af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
13243af24433SOleg Nesterov 	if (!wq)
13254690c4abSTejun Heo 		goto err;
13263af24433SOleg Nesterov 
13270f900049STejun Heo 	wq->cpu_wq = alloc_cwqs();
13284690c4abSTejun Heo 	if (!wq->cpu_wq)
13294690c4abSTejun Heo 		goto err;
13303af24433SOleg Nesterov 
133197e37d7bSTejun Heo 	wq->flags = flags;
1332*73f53c4aSTejun Heo 	mutex_init(&wq->flush_mutex);
1333*73f53c4aSTejun Heo 	atomic_set(&wq->nr_cwqs_to_flush, 0);
1334*73f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_queue);
1335*73f53c4aSTejun Heo 	INIT_LIST_HEAD(&wq->flusher_overflow);
13363af24433SOleg Nesterov 	wq->name = name;
1337eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1338cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
13393af24433SOleg Nesterov 
13403da1c84cSOleg Nesterov 	cpu_maps_update_begin();
13416af8bf3dSOleg Nesterov 	/*
13426af8bf3dSOleg Nesterov 	 * We must initialize cwqs for each possible cpu even if we
13436af8bf3dSOleg Nesterov 	 * are going to call destroy_workqueue() finally. Otherwise
13446af8bf3dSOleg Nesterov 	 * cpu_up() can hit the uninitialized cwq once we drop the
13456af8bf3dSOleg Nesterov 	 * lock.
13466af8bf3dSOleg Nesterov 	 */
13473af24433SOleg Nesterov 	for_each_possible_cpu(cpu) {
13481537663fSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
13491537663fSTejun Heo 
13500f900049STejun Heo 		BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
13511537663fSTejun Heo 		cwq->wq = wq;
13521537663fSTejun Heo 		cwq->cpu = cpu;
1353*73f53c4aSTejun Heo 		cwq->flush_color = -1;
13541537663fSTejun Heo 		spin_lock_init(&cwq->lock);
13551537663fSTejun Heo 		INIT_LIST_HEAD(&cwq->worklist);
13561537663fSTejun Heo 		init_waitqueue_head(&cwq->more_work);
13571537663fSTejun Heo 
13581537663fSTejun Heo 		if (err)
13593af24433SOleg Nesterov 			continue;
13603af24433SOleg Nesterov 		err = create_workqueue_thread(cwq, cpu);
13611537663fSTejun Heo 		if (cpu_online(cpu) && !singlethread)
136206ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
13631537663fSTejun Heo 		else
13641537663fSTejun Heo 			start_workqueue_thread(cwq, -1);
13653af24433SOleg Nesterov 	}
13661537663fSTejun Heo 
13671537663fSTejun Heo 	spin_lock(&workqueue_lock);
13681537663fSTejun Heo 	list_add(&wq->list, &workqueues);
13691537663fSTejun Heo 	spin_unlock(&workqueue_lock);
13701537663fSTejun Heo 
13713da1c84cSOleg Nesterov 	cpu_maps_update_done();
13723af24433SOleg Nesterov 
13733af24433SOleg Nesterov 	if (err) {
13743af24433SOleg Nesterov 		destroy_workqueue(wq);
13753af24433SOleg Nesterov 		wq = NULL;
13763af24433SOleg Nesterov 	}
13773af24433SOleg Nesterov 	return wq;
13784690c4abSTejun Heo err:
13794690c4abSTejun Heo 	if (wq) {
13800f900049STejun Heo 		free_cwqs(wq->cpu_wq);
13814690c4abSTejun Heo 		kfree(wq);
13824690c4abSTejun Heo 	}
13834690c4abSTejun Heo 	return NULL;
13843af24433SOleg Nesterov }
13854e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
13863af24433SOleg Nesterov 
13873af24433SOleg Nesterov /**
13883af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
13893af24433SOleg Nesterov  * @wq: target workqueue
13903af24433SOleg Nesterov  *
13913af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
13923af24433SOleg Nesterov  */
13933af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
13943af24433SOleg Nesterov {
13953af24433SOleg Nesterov 	int cpu;
13963af24433SOleg Nesterov 
13973da1c84cSOleg Nesterov 	cpu_maps_update_begin();
139895402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
13993af24433SOleg Nesterov 	list_del(&wq->list);
140095402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
14013da1c84cSOleg Nesterov 	cpu_maps_update_done();
14023af24433SOleg Nesterov 
1403*73f53c4aSTejun Heo 	flush_workqueue(wq);
1404*73f53c4aSTejun Heo 
1405*73f53c4aSTejun Heo 	for_each_possible_cpu(cpu) {
1406*73f53c4aSTejun Heo 		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1407*73f53c4aSTejun Heo 		int i;
1408*73f53c4aSTejun Heo 
1409*73f53c4aSTejun Heo 		if (cwq->thread) {
1410*73f53c4aSTejun Heo 			kthread_stop(cwq->thread);
1411*73f53c4aSTejun Heo 			cwq->thread = NULL;
1412*73f53c4aSTejun Heo 		}
1413*73f53c4aSTejun Heo 
1414*73f53c4aSTejun Heo 		for (i = 0; i < WORK_NR_COLORS; i++)
1415*73f53c4aSTejun Heo 			BUG_ON(cwq->nr_in_flight[i]);
1416*73f53c4aSTejun Heo 	}
14171537663fSTejun Heo 
14180f900049STejun Heo 	free_cwqs(wq->cpu_wq);
14193af24433SOleg Nesterov 	kfree(wq);
14203af24433SOleg Nesterov }
14213af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
14223af24433SOleg Nesterov 
14239c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
14241da177e4SLinus Torvalds 						unsigned long action,
14251da177e4SLinus Torvalds 						void *hcpu)
14261da177e4SLinus Torvalds {
14273af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
14283af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
14291da177e4SLinus Torvalds 	struct workqueue_struct *wq;
14301da177e4SLinus Torvalds 
14318bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
14328bb78442SRafael J. Wysocki 
14331da177e4SLinus Torvalds 	list_for_each_entry(wq, &workqueues, list) {
14341537663fSTejun Heo 		if (wq->flags & WQ_SINGLE_THREAD)
14351537663fSTejun Heo 			continue;
14361537663fSTejun Heo 
14371537663fSTejun Heo 		cwq = get_cwq(cpu, wq);
14383af24433SOleg Nesterov 
14393af24433SOleg Nesterov 		switch (action) {
14403da1c84cSOleg Nesterov 		case CPU_POST_DEAD:
1441*73f53c4aSTejun Heo 			flush_workqueue(wq);
14421da177e4SLinus Torvalds 			break;
14431da177e4SLinus Torvalds 		}
14443af24433SOleg Nesterov 	}
14451da177e4SLinus Torvalds 
14461537663fSTejun Heo 	return notifier_from_errno(0);
14471da177e4SLinus Torvalds }
14481da177e4SLinus Torvalds 
14492d3854a3SRusty Russell #ifdef CONFIG_SMP
14508ccad40dSRusty Russell 
14512d3854a3SRusty Russell struct work_for_cpu {
14526b44003eSAndrew Morton 	struct completion completion;
14532d3854a3SRusty Russell 	long (*fn)(void *);
14542d3854a3SRusty Russell 	void *arg;
14552d3854a3SRusty Russell 	long ret;
14562d3854a3SRusty Russell };
14572d3854a3SRusty Russell 
14586b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
14592d3854a3SRusty Russell {
14606b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
14612d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
14626b44003eSAndrew Morton 	complete(&wfc->completion);
14636b44003eSAndrew Morton 	return 0;
14642d3854a3SRusty Russell }
14652d3854a3SRusty Russell 
14662d3854a3SRusty Russell /**
14672d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
14682d3854a3SRusty Russell  * @cpu: the cpu to run on
14692d3854a3SRusty Russell  * @fn: the function to run
14702d3854a3SRusty Russell  * @arg: the function arg
14712d3854a3SRusty Russell  *
147231ad9081SRusty Russell  * This will return the value @fn returns.
147331ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
14746b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
14752d3854a3SRusty Russell  */
14762d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
14772d3854a3SRusty Russell {
14786b44003eSAndrew Morton 	struct task_struct *sub_thread;
14796b44003eSAndrew Morton 	struct work_for_cpu wfc = {
14806b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
14816b44003eSAndrew Morton 		.fn = fn,
14826b44003eSAndrew Morton 		.arg = arg,
14836b44003eSAndrew Morton 	};
14842d3854a3SRusty Russell 
14856b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
14866b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
14876b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
14886b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
14896b44003eSAndrew Morton 	wake_up_process(sub_thread);
14906b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
14912d3854a3SRusty Russell 	return wfc.ret;
14922d3854a3SRusty Russell }
14932d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
14942d3854a3SRusty Russell #endif /* CONFIG_SMP */
14952d3854a3SRusty Russell 
1496c12920d1SOleg Nesterov void __init init_workqueues(void)
14971da177e4SLinus Torvalds {
1498e7577c50SRusty Russell 	singlethread_cpu = cpumask_first(cpu_possible_mask);
14991da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
15001da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
15011da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
15021da177e4SLinus Torvalds }
1503