xref: /linux-6.15/kernel/workqueue.c (revision a25909a4)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36fb39125fSZhaolei #define CREATE_TRACE_POINTS
37fb39125fSZhaolei #include <trace/events/workqueue.h>
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds /*
40f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
41f756d5e2SNathan Lynch  * possible cpu).
421da177e4SLinus Torvalds  */
431da177e4SLinus Torvalds struct cpu_workqueue_struct {
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds 	spinlock_t lock;
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds 	struct list_head worklist;
481da177e4SLinus Torvalds 	wait_queue_head_t more_work;
493af24433SOleg Nesterov 	struct work_struct *current_work;
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds 	struct workqueue_struct *wq;
5236c8b586SIngo Molnar 	struct task_struct *thread;
531da177e4SLinus Torvalds } ____cacheline_aligned;
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds /*
561da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
571da177e4SLinus Torvalds  * per-CPU workqueues:
581da177e4SLinus Torvalds  */
591da177e4SLinus Torvalds struct workqueue_struct {
6089ada679SChristoph Lameter 	struct cpu_workqueue_struct *cpu_wq;
61cce1a165SOleg Nesterov 	struct list_head list;
621da177e4SLinus Torvalds 	const char *name;
63cce1a165SOleg Nesterov 	int singlethread;
64319c2a98SOleg Nesterov 	int freezeable;		/* Freeze threads during suspend */
650d557dc9SHeiko Carstens 	int rt;
664e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
674e6045f1SJohannes Berg 	struct lockdep_map lockdep_map;
684e6045f1SJohannes Berg #endif
691da177e4SLinus Torvalds };
701da177e4SLinus Torvalds 
71*a25909a4SPaul E. McKenney #ifdef CONFIG_LOCKDEP
72*a25909a4SPaul E. McKenney /**
73*a25909a4SPaul E. McKenney  * in_workqueue_context() - in context of specified workqueue?
74*a25909a4SPaul E. McKenney  * @wq: the workqueue of interest
75*a25909a4SPaul E. McKenney  *
76*a25909a4SPaul E. McKenney  * Checks lockdep state to see if the current task is executing from
77*a25909a4SPaul E. McKenney  * within a workqueue item.  This function exists only if lockdep is
78*a25909a4SPaul E. McKenney  * enabled.
79*a25909a4SPaul E. McKenney  */
80*a25909a4SPaul E. McKenney int in_workqueue_context(struct workqueue_struct *wq)
81*a25909a4SPaul E. McKenney {
82*a25909a4SPaul E. McKenney 	return lock_is_held(&wq->lockdep_map);
83*a25909a4SPaul E. McKenney }
84*a25909a4SPaul E. McKenney #endif
85*a25909a4SPaul E. McKenney 
86dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
87dc186ad7SThomas Gleixner 
88dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
89dc186ad7SThomas Gleixner 
90dc186ad7SThomas Gleixner /*
91dc186ad7SThomas Gleixner  * fixup_init is called when:
92dc186ad7SThomas Gleixner  * - an active object is initialized
93dc186ad7SThomas Gleixner  */
94dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
95dc186ad7SThomas Gleixner {
96dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
97dc186ad7SThomas Gleixner 
98dc186ad7SThomas Gleixner 	switch (state) {
99dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
100dc186ad7SThomas Gleixner 		cancel_work_sync(work);
101dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
102dc186ad7SThomas Gleixner 		return 1;
103dc186ad7SThomas Gleixner 	default:
104dc186ad7SThomas Gleixner 		return 0;
105dc186ad7SThomas Gleixner 	}
106dc186ad7SThomas Gleixner }
107dc186ad7SThomas Gleixner 
108dc186ad7SThomas Gleixner /*
109dc186ad7SThomas Gleixner  * fixup_activate is called when:
110dc186ad7SThomas Gleixner  * - an active object is activated
111dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
112dc186ad7SThomas Gleixner  */
113dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
114dc186ad7SThomas Gleixner {
115dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
116dc186ad7SThomas Gleixner 
117dc186ad7SThomas Gleixner 	switch (state) {
118dc186ad7SThomas Gleixner 
119dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
120dc186ad7SThomas Gleixner 		/*
121dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
122dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
123dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
124dc186ad7SThomas Gleixner 		 */
125dc186ad7SThomas Gleixner 		if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {
126dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
127dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
128dc186ad7SThomas Gleixner 			return 0;
129dc186ad7SThomas Gleixner 		}
130dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
131dc186ad7SThomas Gleixner 		return 0;
132dc186ad7SThomas Gleixner 
133dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
134dc186ad7SThomas Gleixner 		WARN_ON(1);
135dc186ad7SThomas Gleixner 
136dc186ad7SThomas Gleixner 	default:
137dc186ad7SThomas Gleixner 		return 0;
138dc186ad7SThomas Gleixner 	}
139dc186ad7SThomas Gleixner }
140dc186ad7SThomas Gleixner 
141dc186ad7SThomas Gleixner /*
142dc186ad7SThomas Gleixner  * fixup_free is called when:
143dc186ad7SThomas Gleixner  * - an active object is freed
144dc186ad7SThomas Gleixner  */
145dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
146dc186ad7SThomas Gleixner {
147dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
148dc186ad7SThomas Gleixner 
149dc186ad7SThomas Gleixner 	switch (state) {
150dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
151dc186ad7SThomas Gleixner 		cancel_work_sync(work);
152dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
153dc186ad7SThomas Gleixner 		return 1;
154dc186ad7SThomas Gleixner 	default:
155dc186ad7SThomas Gleixner 		return 0;
156dc186ad7SThomas Gleixner 	}
157dc186ad7SThomas Gleixner }
158dc186ad7SThomas Gleixner 
159dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
160dc186ad7SThomas Gleixner 	.name		= "work_struct",
161dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
162dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
163dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
164dc186ad7SThomas Gleixner };
165dc186ad7SThomas Gleixner 
166dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
167dc186ad7SThomas Gleixner {
168dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
169dc186ad7SThomas Gleixner }
170dc186ad7SThomas Gleixner 
171dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
172dc186ad7SThomas Gleixner {
173dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
174dc186ad7SThomas Gleixner }
175dc186ad7SThomas Gleixner 
176dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
177dc186ad7SThomas Gleixner {
178dc186ad7SThomas Gleixner 	if (onstack)
179dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
180dc186ad7SThomas Gleixner 	else
181dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
182dc186ad7SThomas Gleixner }
183dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
184dc186ad7SThomas Gleixner 
185dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
186dc186ad7SThomas Gleixner {
187dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
188dc186ad7SThomas Gleixner }
189dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
190dc186ad7SThomas Gleixner 
191dc186ad7SThomas Gleixner #else
192dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
193dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
194dc186ad7SThomas Gleixner #endif
195dc186ad7SThomas Gleixner 
19695402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
19795402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
1981da177e4SLinus Torvalds static LIST_HEAD(workqueues);
1991da177e4SLinus Torvalds 
2003af24433SOleg Nesterov static int singlethread_cpu __read_mostly;
201e7577c50SRusty Russell static const struct cpumask *cpu_singlethread_map __read_mostly;
20214441960SOleg Nesterov /*
20314441960SOleg Nesterov  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
20414441960SOleg Nesterov  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
20514441960SOleg Nesterov  * which comes in between can't use for_each_online_cpu(). We could
20614441960SOleg Nesterov  * use cpu_possible_map, the cpumask below is more a documentation
20714441960SOleg Nesterov  * than optimization.
20814441960SOleg Nesterov  */
209e7577c50SRusty Russell static cpumask_var_t cpu_populated_map __read_mostly;
210f756d5e2SNathan Lynch 
2111da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
2126cc88bc4SDavid Howells static inline int is_wq_single_threaded(struct workqueue_struct *wq)
2131da177e4SLinus Torvalds {
214cce1a165SOleg Nesterov 	return wq->singlethread;
2151da177e4SLinus Torvalds }
2161da177e4SLinus Torvalds 
217e7577c50SRusty Russell static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
218b1f4ec17SOleg Nesterov {
2196cc88bc4SDavid Howells 	return is_wq_single_threaded(wq)
220e7577c50SRusty Russell 		? cpu_singlethread_map : cpu_populated_map;
221b1f4ec17SOleg Nesterov }
222b1f4ec17SOleg Nesterov 
223a848e3b6SOleg Nesterov static
224a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
225a848e3b6SOleg Nesterov {
2266cc88bc4SDavid Howells 	if (unlikely(is_wq_single_threaded(wq)))
227a848e3b6SOleg Nesterov 		cpu = singlethread_cpu;
228a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
229a848e3b6SOleg Nesterov }
230a848e3b6SOleg Nesterov 
2314594bf15SDavid Howells /*
2324594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
2334594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
2344594bf15SDavid Howells  */
235ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
236ed7c0feeSOleg Nesterov 				struct cpu_workqueue_struct *cwq)
237365970a1SDavid Howells {
2384594bf15SDavid Howells 	unsigned long new;
239365970a1SDavid Howells 
2404594bf15SDavid Howells 	BUG_ON(!work_pending(work));
2414594bf15SDavid Howells 
242ed7c0feeSOleg Nesterov 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
243a08727baSLinus Torvalds 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
244a08727baSLinus Torvalds 	atomic_long_set(&work->data, new);
245365970a1SDavid Howells }
246365970a1SDavid Howells 
2474d707b9fSOleg Nesterov /*
2484d707b9fSOleg Nesterov  * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
2494d707b9fSOleg Nesterov  */
2504d707b9fSOleg Nesterov static inline void clear_wq_data(struct work_struct *work)
2514d707b9fSOleg Nesterov {
2524d707b9fSOleg Nesterov 	unsigned long flags = *work_data_bits(work) &
2534d707b9fSOleg Nesterov 				(1UL << WORK_STRUCT_STATIC);
2544d707b9fSOleg Nesterov 	atomic_long_set(&work->data, flags);
2554d707b9fSOleg Nesterov }
2564d707b9fSOleg Nesterov 
257ed7c0feeSOleg Nesterov static inline
258ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
259365970a1SDavid Howells {
260a08727baSLinus Torvalds 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
261365970a1SDavid Howells }
262365970a1SDavid Howells 
263b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
2641a4d9b0aSOleg Nesterov 			struct work_struct *work, struct list_head *head)
265b89deed3SOleg Nesterov {
266e1d8aa9fSFrederic Weisbecker 	trace_workqueue_insertion(cwq->thread, work);
267e1d8aa9fSFrederic Weisbecker 
268b89deed3SOleg Nesterov 	set_wq_data(work, cwq);
2696e84d644SOleg Nesterov 	/*
2706e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
2716e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
2726e84d644SOleg Nesterov 	 */
2736e84d644SOleg Nesterov 	smp_wmb();
2741a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
275b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
276b89deed3SOleg Nesterov }
277b89deed3SOleg Nesterov 
2781da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq,
2791da177e4SLinus Torvalds 			 struct work_struct *work)
2801da177e4SLinus Torvalds {
2811da177e4SLinus Torvalds 	unsigned long flags;
2821da177e4SLinus Torvalds 
283dc186ad7SThomas Gleixner 	debug_work_activate(work);
2841da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
2851a4d9b0aSOleg Nesterov 	insert_work(cwq, work, &cwq->worklist);
2861da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
2871da177e4SLinus Torvalds }
2881da177e4SLinus Torvalds 
2890fcb78c2SRolf Eike Beer /**
2900fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
2910fcb78c2SRolf Eike Beer  * @wq: workqueue to use
2920fcb78c2SRolf Eike Beer  * @work: work to queue
2930fcb78c2SRolf Eike Beer  *
294057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2951da177e4SLinus Torvalds  *
29600dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
29700dfcaf7SOleg Nesterov  * it can be processed by another CPU.
2981da177e4SLinus Torvalds  */
2997ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
3001da177e4SLinus Torvalds {
301ef1ca236SOleg Nesterov 	int ret;
3021da177e4SLinus Torvalds 
303ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
304a848e3b6SOleg Nesterov 	put_cpu();
305ef1ca236SOleg Nesterov 
3061da177e4SLinus Torvalds 	return ret;
3071da177e4SLinus Torvalds }
308ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
3091da177e4SLinus Torvalds 
310c1a220e7SZhang Rui /**
311c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
312c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
313c1a220e7SZhang Rui  * @wq: workqueue to use
314c1a220e7SZhang Rui  * @work: work to queue
315c1a220e7SZhang Rui  *
316c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
317c1a220e7SZhang Rui  *
318c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
319c1a220e7SZhang Rui  * can't go away.
320c1a220e7SZhang Rui  */
321c1a220e7SZhang Rui int
322c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
323c1a220e7SZhang Rui {
324c1a220e7SZhang Rui 	int ret = 0;
325c1a220e7SZhang Rui 
326c1a220e7SZhang Rui 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
327c1a220e7SZhang Rui 		BUG_ON(!list_empty(&work->entry));
328c1a220e7SZhang Rui 		__queue_work(wq_per_cpu(wq, cpu), work);
329c1a220e7SZhang Rui 		ret = 1;
330c1a220e7SZhang Rui 	}
331c1a220e7SZhang Rui 	return ret;
332c1a220e7SZhang Rui }
333c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
334c1a220e7SZhang Rui 
3356d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
3361da177e4SLinus Torvalds {
33752bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
338ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
339ed7c0feeSOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
3401da177e4SLinus Torvalds 
341a848e3b6SOleg Nesterov 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
3421da177e4SLinus Torvalds }
3431da177e4SLinus Torvalds 
3440fcb78c2SRolf Eike Beer /**
3450fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
3460fcb78c2SRolf Eike Beer  * @wq: workqueue to use
347af9997e4SRandy Dunlap  * @dwork: delayable work to queue
3480fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
3490fcb78c2SRolf Eike Beer  *
350057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
3510fcb78c2SRolf Eike Beer  */
3527ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
35352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
3541da177e4SLinus Torvalds {
35552bad64dSDavid Howells 	if (delay == 0)
35663bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
3571da177e4SLinus Torvalds 
35863bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
3591da177e4SLinus Torvalds }
360ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
3611da177e4SLinus Torvalds 
3620fcb78c2SRolf Eike Beer /**
3630fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
3640fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
3650fcb78c2SRolf Eike Beer  * @wq: workqueue to use
366af9997e4SRandy Dunlap  * @dwork: work to queue
3670fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
3680fcb78c2SRolf Eike Beer  *
369057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
3700fcb78c2SRolf Eike Beer  */
3717a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
37252bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
3737a6bc1cdSVenkatesh Pallipadi {
3747a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
37552bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
37652bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
3777a6bc1cdSVenkatesh Pallipadi 
378a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
3797a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
3807a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
3817a6bc1cdSVenkatesh Pallipadi 
3828a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
3838a3e77ccSAndrew Liu 
384ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
385a848e3b6SOleg Nesterov 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
3867a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
38752bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
3887a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
38963bc0362SOleg Nesterov 
39063bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
3917a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
39263bc0362SOleg Nesterov 		else
39363bc0362SOleg Nesterov 			add_timer(timer);
3947a6bc1cdSVenkatesh Pallipadi 		ret = 1;
3957a6bc1cdSVenkatesh Pallipadi 	}
3967a6bc1cdSVenkatesh Pallipadi 	return ret;
3977a6bc1cdSVenkatesh Pallipadi }
398ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
3991da177e4SLinus Torvalds 
400858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
4011da177e4SLinus Torvalds {
402f293ea92SOleg Nesterov 	spin_lock_irq(&cwq->lock);
4031da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
4041da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
4051da177e4SLinus Torvalds 						struct work_struct, entry);
4066bb49e59SDavid Howells 		work_func_t f = work->func;
4074e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
4084e6045f1SJohannes Berg 		/*
4094e6045f1SJohannes Berg 		 * It is permissible to free the struct work_struct
4104e6045f1SJohannes Berg 		 * from inside the function that is called from it,
4114e6045f1SJohannes Berg 		 * this we need to take into account for lockdep too.
4124e6045f1SJohannes Berg 		 * To avoid bogus "held lock freed" warnings as well
4134e6045f1SJohannes Berg 		 * as problems when looking into work->lockdep_map,
4144e6045f1SJohannes Berg 		 * make a copy and use that here.
4154e6045f1SJohannes Berg 		 */
4164e6045f1SJohannes Berg 		struct lockdep_map lockdep_map = work->lockdep_map;
4174e6045f1SJohannes Berg #endif
418e1d8aa9fSFrederic Weisbecker 		trace_workqueue_execution(cwq->thread, work);
419dc186ad7SThomas Gleixner 		debug_work_deactivate(work);
420b89deed3SOleg Nesterov 		cwq->current_work = work;
4211da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
422f293ea92SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
4231da177e4SLinus Torvalds 
424365970a1SDavid Howells 		BUG_ON(get_wq_data(work) != cwq);
42523b2e599SOleg Nesterov 		work_clear_pending(work);
4263295f0efSIngo Molnar 		lock_map_acquire(&cwq->wq->lockdep_map);
4273295f0efSIngo Molnar 		lock_map_acquire(&lockdep_map);
42865f27f38SDavid Howells 		f(work);
4293295f0efSIngo Molnar 		lock_map_release(&lockdep_map);
4303295f0efSIngo Molnar 		lock_map_release(&cwq->wq->lockdep_map);
4311da177e4SLinus Torvalds 
432d5abe669SPeter Zijlstra 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
433d5abe669SPeter Zijlstra 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
434d5abe669SPeter Zijlstra 					"%s/0x%08x/%d\n",
435d5abe669SPeter Zijlstra 					current->comm, preempt_count(),
436ba25f9dcSPavel Emelyanov 				       	task_pid_nr(current));
437d5abe669SPeter Zijlstra 			printk(KERN_ERR "    last function: ");
438d5abe669SPeter Zijlstra 			print_symbol("%s\n", (unsigned long)f);
439d5abe669SPeter Zijlstra 			debug_show_held_locks(current);
440d5abe669SPeter Zijlstra 			dump_stack();
441d5abe669SPeter Zijlstra 		}
442d5abe669SPeter Zijlstra 
443f293ea92SOleg Nesterov 		spin_lock_irq(&cwq->lock);
444b89deed3SOleg Nesterov 		cwq->current_work = NULL;
4451da177e4SLinus Torvalds 	}
446f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
4471da177e4SLinus Torvalds }
4481da177e4SLinus Torvalds 
4491da177e4SLinus Torvalds static int worker_thread(void *__cwq)
4501da177e4SLinus Torvalds {
4511da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
4523af24433SOleg Nesterov 	DEFINE_WAIT(wait);
4531da177e4SLinus Torvalds 
45483144186SRafael J. Wysocki 	if (cwq->wq->freezeable)
45583144186SRafael J. Wysocki 		set_freezable();
4561da177e4SLinus Torvalds 
4573af24433SOleg Nesterov 	for (;;) {
4583af24433SOleg Nesterov 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
45914441960SOleg Nesterov 		if (!freezing(current) &&
46014441960SOleg Nesterov 		    !kthread_should_stop() &&
46114441960SOleg Nesterov 		    list_empty(&cwq->worklist))
4621da177e4SLinus Torvalds 			schedule();
4633af24433SOleg Nesterov 		finish_wait(&cwq->more_work, &wait);
4641da177e4SLinus Torvalds 
46585f4186aSOleg Nesterov 		try_to_freeze();
46685f4186aSOleg Nesterov 
46714441960SOleg Nesterov 		if (kthread_should_stop())
4683af24433SOleg Nesterov 			break;
4693af24433SOleg Nesterov 
4701da177e4SLinus Torvalds 		run_workqueue(cwq);
4711da177e4SLinus Torvalds 	}
4723af24433SOleg Nesterov 
4731da177e4SLinus Torvalds 	return 0;
4741da177e4SLinus Torvalds }
4751da177e4SLinus Torvalds 
476fc2e4d70SOleg Nesterov struct wq_barrier {
477fc2e4d70SOleg Nesterov 	struct work_struct	work;
478fc2e4d70SOleg Nesterov 	struct completion	done;
479fc2e4d70SOleg Nesterov };
480fc2e4d70SOleg Nesterov 
481fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
482fc2e4d70SOleg Nesterov {
483fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
484fc2e4d70SOleg Nesterov 	complete(&barr->done);
485fc2e4d70SOleg Nesterov }
486fc2e4d70SOleg Nesterov 
48783c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
4881a4d9b0aSOleg Nesterov 			struct wq_barrier *barr, struct list_head *head)
489fc2e4d70SOleg Nesterov {
490dc186ad7SThomas Gleixner 	/*
491dc186ad7SThomas Gleixner 	 * debugobject calls are safe here even with cwq->lock locked
492dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
493dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
494dc186ad7SThomas Gleixner 	 * might deadlock.
495dc186ad7SThomas Gleixner 	 */
496dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
497fc2e4d70SOleg Nesterov 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
498fc2e4d70SOleg Nesterov 
499fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
50083c22520SOleg Nesterov 
501dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
5021a4d9b0aSOleg Nesterov 	insert_work(cwq, &barr->work, head);
503fc2e4d70SOleg Nesterov }
504fc2e4d70SOleg Nesterov 
50514441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
5061da177e4SLinus Torvalds {
5072355b70fSLai Jiangshan 	int active = 0;
508fc2e4d70SOleg Nesterov 	struct wq_barrier barr;
5091da177e4SLinus Torvalds 
5102355b70fSLai Jiangshan 	WARN_ON(cwq->thread == current);
5112355b70fSLai Jiangshan 
51283c22520SOleg Nesterov 	spin_lock_irq(&cwq->lock);
51383c22520SOleg Nesterov 	if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
5141a4d9b0aSOleg Nesterov 		insert_wq_barrier(cwq, &barr, &cwq->worklist);
51583c22520SOleg Nesterov 		active = 1;
51683c22520SOleg Nesterov 	}
51783c22520SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
5181da177e4SLinus Torvalds 
519dc186ad7SThomas Gleixner 	if (active) {
520fc2e4d70SOleg Nesterov 		wait_for_completion(&barr.done);
521dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
522dc186ad7SThomas Gleixner 	}
52314441960SOleg Nesterov 
52414441960SOleg Nesterov 	return active;
52583c22520SOleg Nesterov }
5261da177e4SLinus Torvalds 
5270fcb78c2SRolf Eike Beer /**
5281da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
5290fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
5301da177e4SLinus Torvalds  *
5311da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
5321da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
5331da177e4SLinus Torvalds  *
534fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
535fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
5361da177e4SLinus Torvalds  *
5371da177e4SLinus Torvalds  * This function used to run the workqueues itself.  Now we just wait for the
5381da177e4SLinus Torvalds  * helper threads to do it.
5391da177e4SLinus Torvalds  */
5407ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
5411da177e4SLinus Torvalds {
542e7577c50SRusty Russell 	const struct cpumask *cpu_map = wq_cpu_map(wq);
543cce1a165SOleg Nesterov 	int cpu;
544b1f4ec17SOleg Nesterov 
545f293ea92SOleg Nesterov 	might_sleep();
5463295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
5473295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
548aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
54989ada679SChristoph Lameter 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
5501da177e4SLinus Torvalds }
551ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
5521da177e4SLinus Torvalds 
553db700897SOleg Nesterov /**
554db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
555db700897SOleg Nesterov  * @work: the work which is to be flushed
556db700897SOleg Nesterov  *
557a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
558a67da70dSOleg Nesterov  *
559db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
560db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
561db700897SOleg Nesterov  * sense to use this function.
562db700897SOleg Nesterov  */
563db700897SOleg Nesterov int flush_work(struct work_struct *work)
564db700897SOleg Nesterov {
565db700897SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
566db700897SOleg Nesterov 	struct list_head *prev;
567db700897SOleg Nesterov 	struct wq_barrier barr;
568db700897SOleg Nesterov 
569db700897SOleg Nesterov 	might_sleep();
570db700897SOleg Nesterov 	cwq = get_wq_data(work);
571db700897SOleg Nesterov 	if (!cwq)
572db700897SOleg Nesterov 		return 0;
573db700897SOleg Nesterov 
5743295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
5753295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
576a67da70dSOleg Nesterov 
577db700897SOleg Nesterov 	prev = NULL;
578db700897SOleg Nesterov 	spin_lock_irq(&cwq->lock);
579db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
580db700897SOleg Nesterov 		/*
581db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
582db700897SOleg Nesterov 		 * If it was re-queued under us we are not going to wait.
583db700897SOleg Nesterov 		 */
584db700897SOleg Nesterov 		smp_rmb();
585db700897SOleg Nesterov 		if (unlikely(cwq != get_wq_data(work)))
586db700897SOleg Nesterov 			goto out;
587db700897SOleg Nesterov 		prev = &work->entry;
588db700897SOleg Nesterov 	} else {
589db700897SOleg Nesterov 		if (cwq->current_work != work)
590db700897SOleg Nesterov 			goto out;
591db700897SOleg Nesterov 		prev = &cwq->worklist;
592db700897SOleg Nesterov 	}
593db700897SOleg Nesterov 	insert_wq_barrier(cwq, &barr, prev->next);
594db700897SOleg Nesterov out:
595db700897SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
596db700897SOleg Nesterov 	if (!prev)
597db700897SOleg Nesterov 		return 0;
598db700897SOleg Nesterov 
599db700897SOleg Nesterov 	wait_for_completion(&barr.done);
600dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
601db700897SOleg Nesterov 	return 1;
602db700897SOleg Nesterov }
603db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
604db700897SOleg Nesterov 
6056e84d644SOleg Nesterov /*
6061f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6076e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
6086e84d644SOleg Nesterov  */
6096e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
6106e84d644SOleg Nesterov {
6116e84d644SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
6121f1f642eSOleg Nesterov 	int ret = -1;
6136e84d644SOleg Nesterov 
6146e84d644SOleg Nesterov 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
6151f1f642eSOleg Nesterov 		return 0;
6166e84d644SOleg Nesterov 
6176e84d644SOleg Nesterov 	/*
6186e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
6196e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
6206e84d644SOleg Nesterov 	 */
6216e84d644SOleg Nesterov 
6226e84d644SOleg Nesterov 	cwq = get_wq_data(work);
6236e84d644SOleg Nesterov 	if (!cwq)
6246e84d644SOleg Nesterov 		return ret;
6256e84d644SOleg Nesterov 
6266e84d644SOleg Nesterov 	spin_lock_irq(&cwq->lock);
6276e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
6286e84d644SOleg Nesterov 		/*
6296e84d644SOleg Nesterov 		 * This work is queued, but perhaps we locked the wrong cwq.
6306e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
6316e84d644SOleg Nesterov 		 * insert_work()->wmb().
6326e84d644SOleg Nesterov 		 */
6336e84d644SOleg Nesterov 		smp_rmb();
6346e84d644SOleg Nesterov 		if (cwq == get_wq_data(work)) {
635dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
6366e84d644SOleg Nesterov 			list_del_init(&work->entry);
6376e84d644SOleg Nesterov 			ret = 1;
6386e84d644SOleg Nesterov 		}
6396e84d644SOleg Nesterov 	}
6406e84d644SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
6416e84d644SOleg Nesterov 
6426e84d644SOleg Nesterov 	return ret;
6436e84d644SOleg Nesterov }
6446e84d644SOleg Nesterov 
6456e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
646b89deed3SOleg Nesterov 				struct work_struct *work)
647b89deed3SOleg Nesterov {
648b89deed3SOleg Nesterov 	struct wq_barrier barr;
649b89deed3SOleg Nesterov 	int running = 0;
650b89deed3SOleg Nesterov 
651b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
652b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
6531a4d9b0aSOleg Nesterov 		insert_wq_barrier(cwq, &barr, cwq->worklist.next);
654b89deed3SOleg Nesterov 		running = 1;
655b89deed3SOleg Nesterov 	}
656b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
657b89deed3SOleg Nesterov 
658dc186ad7SThomas Gleixner 	if (unlikely(running)) {
659b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
660dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
661dc186ad7SThomas Gleixner 	}
662b89deed3SOleg Nesterov }
663b89deed3SOleg Nesterov 
6646e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
665b89deed3SOleg Nesterov {
666b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
66728e53bddSOleg Nesterov 	struct workqueue_struct *wq;
668e7577c50SRusty Russell 	const struct cpumask *cpu_map;
669b1f4ec17SOleg Nesterov 	int cpu;
670b89deed3SOleg Nesterov 
671f293ea92SOleg Nesterov 	might_sleep();
672f293ea92SOleg Nesterov 
6733295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
6743295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
6754e6045f1SJohannes Berg 
676b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
677b89deed3SOleg Nesterov 	if (!cwq)
6783af24433SOleg Nesterov 		return;
679b89deed3SOleg Nesterov 
68028e53bddSOleg Nesterov 	wq = cwq->wq;
68128e53bddSOleg Nesterov 	cpu_map = wq_cpu_map(wq);
68228e53bddSOleg Nesterov 
683aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
6846e84d644SOleg Nesterov 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
6856e84d644SOleg Nesterov }
6866e84d644SOleg Nesterov 
6871f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
6881f1f642eSOleg Nesterov 				struct timer_list* timer)
6891f1f642eSOleg Nesterov {
6901f1f642eSOleg Nesterov 	int ret;
6911f1f642eSOleg Nesterov 
6921f1f642eSOleg Nesterov 	do {
6931f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
6941f1f642eSOleg Nesterov 		if (!ret)
6951f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
6961f1f642eSOleg Nesterov 		wait_on_work(work);
6971f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
6981f1f642eSOleg Nesterov 
6994d707b9fSOleg Nesterov 	clear_wq_data(work);
7001f1f642eSOleg Nesterov 	return ret;
7011f1f642eSOleg Nesterov }
7021f1f642eSOleg Nesterov 
7036e84d644SOleg Nesterov /**
7046e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
7056e84d644SOleg Nesterov  * @work: the work which is to be flushed
7066e84d644SOleg Nesterov  *
7071f1f642eSOleg Nesterov  * Returns true if @work was pending.
7081f1f642eSOleg Nesterov  *
7096e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
7106e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
7116e84d644SOleg Nesterov  * has completed.
7126e84d644SOleg Nesterov  *
7136e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
7146e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
7156e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
7166e84d644SOleg Nesterov  * workqueue.
7176e84d644SOleg Nesterov  *
7186e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
7196e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
7206e84d644SOleg Nesterov  *
7216e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
7226e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
7236e84d644SOleg Nesterov  */
7241f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
7256e84d644SOleg Nesterov {
7261f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
727b89deed3SOleg Nesterov }
72828e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
729b89deed3SOleg Nesterov 
7306e84d644SOleg Nesterov /**
731f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
7326e84d644SOleg Nesterov  * @dwork: the delayed work struct
7336e84d644SOleg Nesterov  *
7341f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
7351f1f642eSOleg Nesterov  *
7366e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
7376e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
7386e84d644SOleg Nesterov  */
7391f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
7406e84d644SOleg Nesterov {
7411f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
7426e84d644SOleg Nesterov }
743f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
7441da177e4SLinus Torvalds 
7456e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
7461da177e4SLinus Torvalds 
7470fcb78c2SRolf Eike Beer /**
7480fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
7490fcb78c2SRolf Eike Beer  * @work: job to be done
7500fcb78c2SRolf Eike Beer  *
7515b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
7525b0f437dSBart Van Assche  * non-zero otherwise.
7535b0f437dSBart Van Assche  *
7545b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
7555b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
7565b0f437dSBart Van Assche  * workqueue otherwise.
7570fcb78c2SRolf Eike Beer  */
7587ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
7591da177e4SLinus Torvalds {
7601da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
7611da177e4SLinus Torvalds }
762ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
7631da177e4SLinus Torvalds 
764c1a220e7SZhang Rui /*
765c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
766c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
767c1a220e7SZhang Rui  * @work: job to be done
768c1a220e7SZhang Rui  *
769c1a220e7SZhang Rui  * This puts a job on a specific cpu
770c1a220e7SZhang Rui  */
771c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
772c1a220e7SZhang Rui {
773c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
774c1a220e7SZhang Rui }
775c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
776c1a220e7SZhang Rui 
7770fcb78c2SRolf Eike Beer /**
7780fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
77952bad64dSDavid Howells  * @dwork: job to be done
78052bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
7810fcb78c2SRolf Eike Beer  *
7820fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
7830fcb78c2SRolf Eike Beer  * workqueue.
7840fcb78c2SRolf Eike Beer  */
7857ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
78682f67cd9SIngo Molnar 					unsigned long delay)
7871da177e4SLinus Torvalds {
78852bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
7891da177e4SLinus Torvalds }
790ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
7911da177e4SLinus Torvalds 
7920fcb78c2SRolf Eike Beer /**
7938c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
7948c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
7958c53e463SLinus Torvalds  *
7968c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
7978c53e463SLinus Torvalds  */
7988c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
7998c53e463SLinus Torvalds {
8008c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
8018c53e463SLinus Torvalds 		struct cpu_workqueue_struct *cwq;
80247dd5be2SOleg Nesterov 		cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
8038c53e463SLinus Torvalds 		__queue_work(cwq, &dwork->work);
8048c53e463SLinus Torvalds 		put_cpu();
8058c53e463SLinus Torvalds 	}
8068c53e463SLinus Torvalds 	flush_work(&dwork->work);
8078c53e463SLinus Torvalds }
8088c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
8098c53e463SLinus Torvalds 
8108c53e463SLinus Torvalds /**
8110fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
8120fcb78c2SRolf Eike Beer  * @cpu: cpu to use
81352bad64dSDavid Howells  * @dwork: job to be done
8140fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
8150fcb78c2SRolf Eike Beer  *
8160fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
8170fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
8180fcb78c2SRolf Eike Beer  */
8191da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
82052bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
8211da177e4SLinus Torvalds {
82252bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
8231da177e4SLinus Torvalds }
824ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
8251da177e4SLinus Torvalds 
826b6136773SAndrew Morton /**
827b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
828b6136773SAndrew Morton  * @func: the function to call
829b6136773SAndrew Morton  *
830b6136773SAndrew Morton  * Returns zero on success.
831b6136773SAndrew Morton  * Returns -ve errno on failure.
832b6136773SAndrew Morton  *
833b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
834b6136773SAndrew Morton  */
83565f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
83615316ba8SChristoph Lameter {
83715316ba8SChristoph Lameter 	int cpu;
83865a64464SAndi Kleen 	int orig = -1;
839b6136773SAndrew Morton 	struct work_struct *works;
84015316ba8SChristoph Lameter 
841b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
842b6136773SAndrew Morton 	if (!works)
84315316ba8SChristoph Lameter 		return -ENOMEM;
844b6136773SAndrew Morton 
84595402b38SGautham R Shenoy 	get_online_cpus();
84693981800STejun Heo 
84793981800STejun Heo 	/*
84893981800STejun Heo 	 * When running in keventd don't schedule a work item on
84993981800STejun Heo 	 * itself.  Can just call directly because the work queue is
85093981800STejun Heo 	 * already bound.  This also is faster.
85193981800STejun Heo 	 */
85293981800STejun Heo 	if (current_is_keventd())
85393981800STejun Heo 		orig = raw_smp_processor_id();
85493981800STejun Heo 
85515316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
8569bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
8579bfb1839SIngo Molnar 
8589bfb1839SIngo Molnar 		INIT_WORK(work, func);
85993981800STejun Heo 		if (cpu != orig)
8608de6d308SOleg Nesterov 			schedule_work_on(cpu, work);
86115316ba8SChristoph Lameter 	}
86293981800STejun Heo 	if (orig >= 0)
86393981800STejun Heo 		func(per_cpu_ptr(works, orig));
86493981800STejun Heo 
86593981800STejun Heo 	for_each_online_cpu(cpu)
8668616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
86793981800STejun Heo 
86895402b38SGautham R Shenoy 	put_online_cpus();
869b6136773SAndrew Morton 	free_percpu(works);
87015316ba8SChristoph Lameter 	return 0;
87115316ba8SChristoph Lameter }
87215316ba8SChristoph Lameter 
873eef6a7d5SAlan Stern /**
874eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
875eef6a7d5SAlan Stern  *
876eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
877eef6a7d5SAlan Stern  * completion.
878eef6a7d5SAlan Stern  *
879eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
880eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
881eef6a7d5SAlan Stern  * will lead to deadlock:
882eef6a7d5SAlan Stern  *
883eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
884eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
885eef6a7d5SAlan Stern  *
886eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
887eef6a7d5SAlan Stern  *
888eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
889eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
890eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
891eef6a7d5SAlan Stern  *
892eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
893eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
894eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
895eef6a7d5SAlan Stern  * cancel_work_sync() instead.
896eef6a7d5SAlan Stern  */
8971da177e4SLinus Torvalds void flush_scheduled_work(void)
8981da177e4SLinus Torvalds {
8991da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
9001da177e4SLinus Torvalds }
901ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
9021da177e4SLinus Torvalds 
9031da177e4SLinus Torvalds /**
9041fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
9051fa44ecaSJames Bottomley  * @fn:		the function to execute
9061fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
9071fa44ecaSJames Bottomley  *		be available when the work executes)
9081fa44ecaSJames Bottomley  *
9091fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
9101fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
9111fa44ecaSJames Bottomley  *
9121fa44ecaSJames Bottomley  * Returns:	0 - function was executed
9131fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
9141fa44ecaSJames Bottomley  */
91565f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
9161fa44ecaSJames Bottomley {
9171fa44ecaSJames Bottomley 	if (!in_interrupt()) {
91865f27f38SDavid Howells 		fn(&ew->work);
9191fa44ecaSJames Bottomley 		return 0;
9201fa44ecaSJames Bottomley 	}
9211fa44ecaSJames Bottomley 
92265f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
9231fa44ecaSJames Bottomley 	schedule_work(&ew->work);
9241fa44ecaSJames Bottomley 
9251fa44ecaSJames Bottomley 	return 1;
9261fa44ecaSJames Bottomley }
9271fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
9281fa44ecaSJames Bottomley 
9291da177e4SLinus Torvalds int keventd_up(void)
9301da177e4SLinus Torvalds {
9311da177e4SLinus Torvalds 	return keventd_wq != NULL;
9321da177e4SLinus Torvalds }
9331da177e4SLinus Torvalds 
9341da177e4SLinus Torvalds int current_is_keventd(void)
9351da177e4SLinus Torvalds {
9361da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
937d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
9381da177e4SLinus Torvalds 	int ret = 0;
9391da177e4SLinus Torvalds 
9401da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
9411da177e4SLinus Torvalds 
94289ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
9431da177e4SLinus Torvalds 	if (current == cwq->thread)
9441da177e4SLinus Torvalds 		ret = 1;
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 	return ret;
9471da177e4SLinus Torvalds 
9481da177e4SLinus Torvalds }
9491da177e4SLinus Torvalds 
9503af24433SOleg Nesterov static struct cpu_workqueue_struct *
9513af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
9521da177e4SLinus Torvalds {
95389ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
9543af24433SOleg Nesterov 
9553af24433SOleg Nesterov 	cwq->wq = wq;
9563af24433SOleg Nesterov 	spin_lock_init(&cwq->lock);
9573af24433SOleg Nesterov 	INIT_LIST_HEAD(&cwq->worklist);
9583af24433SOleg Nesterov 	init_waitqueue_head(&cwq->more_work);
9593af24433SOleg Nesterov 
9603af24433SOleg Nesterov 	return cwq;
9613af24433SOleg Nesterov }
9623af24433SOleg Nesterov 
9633af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
9643af24433SOleg Nesterov {
9650d557dc9SHeiko Carstens 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
9663af24433SOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
9676cc88bc4SDavid Howells 	const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
9683af24433SOleg Nesterov 	struct task_struct *p;
9693af24433SOleg Nesterov 
9703af24433SOleg Nesterov 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
9713af24433SOleg Nesterov 	/*
9723af24433SOleg Nesterov 	 * Nobody can add the work_struct to this cwq,
9733af24433SOleg Nesterov 	 *	if (caller is __create_workqueue)
9743af24433SOleg Nesterov 	 *		nobody should see this wq
9753af24433SOleg Nesterov 	 *	else // caller is CPU_UP_PREPARE
9763af24433SOleg Nesterov 	 *		cpu is not on cpu_online_map
9773af24433SOleg Nesterov 	 * so we can abort safely.
9783af24433SOleg Nesterov 	 */
9793af24433SOleg Nesterov 	if (IS_ERR(p))
9803af24433SOleg Nesterov 		return PTR_ERR(p);
9810d557dc9SHeiko Carstens 	if (cwq->wq->rt)
9820d557dc9SHeiko Carstens 		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
9833af24433SOleg Nesterov 	cwq->thread = p;
9843af24433SOleg Nesterov 
985e1d8aa9fSFrederic Weisbecker 	trace_workqueue_creation(cwq->thread, cpu);
986e1d8aa9fSFrederic Weisbecker 
9873af24433SOleg Nesterov 	return 0;
9883af24433SOleg Nesterov }
9893af24433SOleg Nesterov 
99006ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
99106ba38a9SOleg Nesterov {
99206ba38a9SOleg Nesterov 	struct task_struct *p = cwq->thread;
99306ba38a9SOleg Nesterov 
99406ba38a9SOleg Nesterov 	if (p != NULL) {
99506ba38a9SOleg Nesterov 		if (cpu >= 0)
99606ba38a9SOleg Nesterov 			kthread_bind(p, cpu);
99706ba38a9SOleg Nesterov 		wake_up_process(p);
99806ba38a9SOleg Nesterov 	}
99906ba38a9SOleg Nesterov }
100006ba38a9SOleg Nesterov 
10014e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
10024e6045f1SJohannes Berg 						int singlethread,
10034e6045f1SJohannes Berg 						int freezeable,
10040d557dc9SHeiko Carstens 						int rt,
1005eb13ba87SJohannes Berg 						struct lock_class_key *key,
1006eb13ba87SJohannes Berg 						const char *lock_name)
10073af24433SOleg Nesterov {
10083af24433SOleg Nesterov 	struct workqueue_struct *wq;
10093af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
10103af24433SOleg Nesterov 	int err = 0, cpu;
10113af24433SOleg Nesterov 
10123af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
10133af24433SOleg Nesterov 	if (!wq)
10143af24433SOleg Nesterov 		return NULL;
10153af24433SOleg Nesterov 
10163af24433SOleg Nesterov 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
10173af24433SOleg Nesterov 	if (!wq->cpu_wq) {
10183af24433SOleg Nesterov 		kfree(wq);
10193af24433SOleg Nesterov 		return NULL;
10203af24433SOleg Nesterov 	}
10213af24433SOleg Nesterov 
10223af24433SOleg Nesterov 	wq->name = name;
1023eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1024cce1a165SOleg Nesterov 	wq->singlethread = singlethread;
10253af24433SOleg Nesterov 	wq->freezeable = freezeable;
10260d557dc9SHeiko Carstens 	wq->rt = rt;
1027cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
10283af24433SOleg Nesterov 
10293af24433SOleg Nesterov 	if (singlethread) {
10303af24433SOleg Nesterov 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
10313af24433SOleg Nesterov 		err = create_workqueue_thread(cwq, singlethread_cpu);
103206ba38a9SOleg Nesterov 		start_workqueue_thread(cwq, -1);
10333af24433SOleg Nesterov 	} else {
10343da1c84cSOleg Nesterov 		cpu_maps_update_begin();
10356af8bf3dSOleg Nesterov 		/*
10366af8bf3dSOleg Nesterov 		 * We must place this wq on list even if the code below fails.
10376af8bf3dSOleg Nesterov 		 * cpu_down(cpu) can remove cpu from cpu_populated_map before
10386af8bf3dSOleg Nesterov 		 * destroy_workqueue() takes the lock, in that case we leak
10396af8bf3dSOleg Nesterov 		 * cwq[cpu]->thread.
10406af8bf3dSOleg Nesterov 		 */
104195402b38SGautham R Shenoy 		spin_lock(&workqueue_lock);
10423af24433SOleg Nesterov 		list_add(&wq->list, &workqueues);
104395402b38SGautham R Shenoy 		spin_unlock(&workqueue_lock);
10446af8bf3dSOleg Nesterov 		/*
10456af8bf3dSOleg Nesterov 		 * We must initialize cwqs for each possible cpu even if we
10466af8bf3dSOleg Nesterov 		 * are going to call destroy_workqueue() finally. Otherwise
10476af8bf3dSOleg Nesterov 		 * cpu_up() can hit the uninitialized cwq once we drop the
10486af8bf3dSOleg Nesterov 		 * lock.
10496af8bf3dSOleg Nesterov 		 */
10503af24433SOleg Nesterov 		for_each_possible_cpu(cpu) {
10513af24433SOleg Nesterov 			cwq = init_cpu_workqueue(wq, cpu);
10523af24433SOleg Nesterov 			if (err || !cpu_online(cpu))
10533af24433SOleg Nesterov 				continue;
10543af24433SOleg Nesterov 			err = create_workqueue_thread(cwq, cpu);
105506ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
10563af24433SOleg Nesterov 		}
10573da1c84cSOleg Nesterov 		cpu_maps_update_done();
10583af24433SOleg Nesterov 	}
10593af24433SOleg Nesterov 
10603af24433SOleg Nesterov 	if (err) {
10613af24433SOleg Nesterov 		destroy_workqueue(wq);
10623af24433SOleg Nesterov 		wq = NULL;
10633af24433SOleg Nesterov 	}
10643af24433SOleg Nesterov 	return wq;
10653af24433SOleg Nesterov }
10664e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
10673af24433SOleg Nesterov 
10681e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
10693af24433SOleg Nesterov {
10703af24433SOleg Nesterov 	/*
10713da1c84cSOleg Nesterov 	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
10723da1c84cSOleg Nesterov 	 * cpu_add_remove_lock protects cwq->thread.
10733af24433SOleg Nesterov 	 */
107414441960SOleg Nesterov 	if (cwq->thread == NULL)
107514441960SOleg Nesterov 		return;
107614441960SOleg Nesterov 
10773295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
10783295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
10794e6045f1SJohannes Berg 
108013c22168SOleg Nesterov 	flush_cpu_workqueue(cwq);
108114441960SOleg Nesterov 	/*
10823da1c84cSOleg Nesterov 	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
108313c22168SOleg Nesterov 	 * a concurrent flush_workqueue() can insert a barrier after us.
108413c22168SOleg Nesterov 	 * However, in that case run_workqueue() won't return and check
108513c22168SOleg Nesterov 	 * kthread_should_stop() until it flushes all work_struct's.
108614441960SOleg Nesterov 	 * When ->worklist becomes empty it is safe to exit because no
108714441960SOleg Nesterov 	 * more work_structs can be queued on this cwq: flush_workqueue
108814441960SOleg Nesterov 	 * checks list_empty(), and a "normal" queue_work() can't use
108914441960SOleg Nesterov 	 * a dead CPU.
109014441960SOleg Nesterov 	 */
1091e1d8aa9fSFrederic Weisbecker 	trace_workqueue_destruction(cwq->thread);
109214441960SOleg Nesterov 	kthread_stop(cwq->thread);
109314441960SOleg Nesterov 	cwq->thread = NULL;
10941da177e4SLinus Torvalds }
10951da177e4SLinus Torvalds 
10963af24433SOleg Nesterov /**
10973af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
10983af24433SOleg Nesterov  * @wq: target workqueue
10993af24433SOleg Nesterov  *
11003af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
11013af24433SOleg Nesterov  */
11023af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
11033af24433SOleg Nesterov {
1104e7577c50SRusty Russell 	const struct cpumask *cpu_map = wq_cpu_map(wq);
11053af24433SOleg Nesterov 	int cpu;
11063af24433SOleg Nesterov 
11073da1c84cSOleg Nesterov 	cpu_maps_update_begin();
110895402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
11093af24433SOleg Nesterov 	list_del(&wq->list);
111095402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
11113af24433SOleg Nesterov 
1112aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
11131e35eaa2SOleg Nesterov 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
11143da1c84cSOleg Nesterov  	cpu_maps_update_done();
11153af24433SOleg Nesterov 
11163af24433SOleg Nesterov 	free_percpu(wq->cpu_wq);
11173af24433SOleg Nesterov 	kfree(wq);
11183af24433SOleg Nesterov }
11193af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
11203af24433SOleg Nesterov 
11219c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
11221da177e4SLinus Torvalds 						unsigned long action,
11231da177e4SLinus Torvalds 						void *hcpu)
11241da177e4SLinus Torvalds {
11253af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
11263af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
11271da177e4SLinus Torvalds 	struct workqueue_struct *wq;
112880b5184cSAkinobu Mita 	int err = 0;
11291da177e4SLinus Torvalds 
11308bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
11318bb78442SRafael J. Wysocki 
11321da177e4SLinus Torvalds 	switch (action) {
11333af24433SOleg Nesterov 	case CPU_UP_PREPARE:
1134e7577c50SRusty Russell 		cpumask_set_cpu(cpu, cpu_populated_map);
11353af24433SOleg Nesterov 	}
11368448502cSOleg Nesterov undo:
11371da177e4SLinus Torvalds 	list_for_each_entry(wq, &workqueues, list) {
11383af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
11393af24433SOleg Nesterov 
11403af24433SOleg Nesterov 		switch (action) {
11413af24433SOleg Nesterov 		case CPU_UP_PREPARE:
114280b5184cSAkinobu Mita 			err = create_workqueue_thread(cwq, cpu);
114380b5184cSAkinobu Mita 			if (!err)
11441da177e4SLinus Torvalds 				break;
114595402b38SGautham R Shenoy 			printk(KERN_ERR "workqueue [%s] for %i failed\n",
114695402b38SGautham R Shenoy 				wq->name, cpu);
11478448502cSOleg Nesterov 			action = CPU_UP_CANCELED;
114880b5184cSAkinobu Mita 			err = -ENOMEM;
11498448502cSOleg Nesterov 			goto undo;
11501da177e4SLinus Torvalds 
11511da177e4SLinus Torvalds 		case CPU_ONLINE:
115206ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
11531da177e4SLinus Torvalds 			break;
11541da177e4SLinus Torvalds 
11551da177e4SLinus Torvalds 		case CPU_UP_CANCELED:
115606ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, -1);
11573da1c84cSOleg Nesterov 		case CPU_POST_DEAD:
11581e35eaa2SOleg Nesterov 			cleanup_workqueue_thread(cwq);
11591da177e4SLinus Torvalds 			break;
11601da177e4SLinus Torvalds 		}
11613af24433SOleg Nesterov 	}
11621da177e4SLinus Torvalds 
116300dfcaf7SOleg Nesterov 	switch (action) {
116400dfcaf7SOleg Nesterov 	case CPU_UP_CANCELED:
11653da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
1166e7577c50SRusty Russell 		cpumask_clear_cpu(cpu, cpu_populated_map);
116700dfcaf7SOleg Nesterov 	}
116800dfcaf7SOleg Nesterov 
116980b5184cSAkinobu Mita 	return notifier_from_errno(err);
11701da177e4SLinus Torvalds }
11711da177e4SLinus Torvalds 
11722d3854a3SRusty Russell #ifdef CONFIG_SMP
11738ccad40dSRusty Russell 
11742d3854a3SRusty Russell struct work_for_cpu {
11756b44003eSAndrew Morton 	struct completion completion;
11762d3854a3SRusty Russell 	long (*fn)(void *);
11772d3854a3SRusty Russell 	void *arg;
11782d3854a3SRusty Russell 	long ret;
11792d3854a3SRusty Russell };
11802d3854a3SRusty Russell 
11816b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
11822d3854a3SRusty Russell {
11836b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
11842d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
11856b44003eSAndrew Morton 	complete(&wfc->completion);
11866b44003eSAndrew Morton 	return 0;
11872d3854a3SRusty Russell }
11882d3854a3SRusty Russell 
11892d3854a3SRusty Russell /**
11902d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
11912d3854a3SRusty Russell  * @cpu: the cpu to run on
11922d3854a3SRusty Russell  * @fn: the function to run
11932d3854a3SRusty Russell  * @arg: the function arg
11942d3854a3SRusty Russell  *
119531ad9081SRusty Russell  * This will return the value @fn returns.
119631ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
11976b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
11982d3854a3SRusty Russell  */
11992d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
12002d3854a3SRusty Russell {
12016b44003eSAndrew Morton 	struct task_struct *sub_thread;
12026b44003eSAndrew Morton 	struct work_for_cpu wfc = {
12036b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
12046b44003eSAndrew Morton 		.fn = fn,
12056b44003eSAndrew Morton 		.arg = arg,
12066b44003eSAndrew Morton 	};
12072d3854a3SRusty Russell 
12086b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
12096b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
12106b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
12116b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
12126b44003eSAndrew Morton 	wake_up_process(sub_thread);
12136b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
12142d3854a3SRusty Russell 	return wfc.ret;
12152d3854a3SRusty Russell }
12162d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
12172d3854a3SRusty Russell #endif /* CONFIG_SMP */
12182d3854a3SRusty Russell 
1219c12920d1SOleg Nesterov void __init init_workqueues(void)
12201da177e4SLinus Torvalds {
1221e7577c50SRusty Russell 	alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1222e7577c50SRusty Russell 
1223e7577c50SRusty Russell 	cpumask_copy(cpu_populated_map, cpu_online_mask);
1224e7577c50SRusty Russell 	singlethread_cpu = cpumask_first(cpu_possible_mask);
1225e7577c50SRusty Russell 	cpu_singlethread_map = cpumask_of(singlethread_cpu);
12261da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
12271da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
12281da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
12291da177e4SLinus Torvalds }
1230