xref: /linux-6.15/kernel/workqueue.c (revision 22df02bb)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36fb39125fSZhaolei #define CREATE_TRACE_POINTS
37fb39125fSZhaolei #include <trace/events/workqueue.h>
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds /*
404690c4abSTejun Heo  * Structure fields follow one of the following exclusion rules.
414690c4abSTejun Heo  *
424690c4abSTejun Heo  * I: Set during initialization and read-only afterwards.
434690c4abSTejun Heo  *
444690c4abSTejun Heo  * L: cwq->lock protected.  Access with cwq->lock held.
454690c4abSTejun Heo  *
464690c4abSTejun Heo  * W: workqueue_lock protected.
474690c4abSTejun Heo  */
484690c4abSTejun Heo 
494690c4abSTejun Heo /*
50f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
51f756d5e2SNathan Lynch  * possible cpu).
521da177e4SLinus Torvalds  */
531da177e4SLinus Torvalds struct cpu_workqueue_struct {
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds 	spinlock_t lock;
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds 	struct list_head worklist;
581da177e4SLinus Torvalds 	wait_queue_head_t more_work;
593af24433SOleg Nesterov 	struct work_struct *current_work;
601da177e4SLinus Torvalds 
614690c4abSTejun Heo 	struct workqueue_struct *wq;		/* I: the owning workqueue */
6236c8b586SIngo Molnar 	struct task_struct	*thread;
631da177e4SLinus Torvalds } ____cacheline_aligned;
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds /*
661da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
671da177e4SLinus Torvalds  * per-CPU workqueues:
681da177e4SLinus Torvalds  */
691da177e4SLinus Torvalds struct workqueue_struct {
7097e37d7bSTejun Heo 	unsigned int		flags;		/* I: WQ_* flags */
714690c4abSTejun Heo 	struct cpu_workqueue_struct *cpu_wq;	/* I: cwq's */
724690c4abSTejun Heo 	struct list_head	list;		/* W: list of all workqueues */
734690c4abSTejun Heo 	const char		*name;		/* I: workqueue name */
744e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
754e6045f1SJohannes Berg 	struct lockdep_map	lockdep_map;
764e6045f1SJohannes Berg #endif
771da177e4SLinus Torvalds };
781da177e4SLinus Torvalds 
79dc186ad7SThomas Gleixner #ifdef CONFIG_DEBUG_OBJECTS_WORK
80dc186ad7SThomas Gleixner 
81dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr;
82dc186ad7SThomas Gleixner 
83dc186ad7SThomas Gleixner /*
84dc186ad7SThomas Gleixner  * fixup_init is called when:
85dc186ad7SThomas Gleixner  * - an active object is initialized
86dc186ad7SThomas Gleixner  */
87dc186ad7SThomas Gleixner static int work_fixup_init(void *addr, enum debug_obj_state state)
88dc186ad7SThomas Gleixner {
89dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
90dc186ad7SThomas Gleixner 
91dc186ad7SThomas Gleixner 	switch (state) {
92dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
93dc186ad7SThomas Gleixner 		cancel_work_sync(work);
94dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
95dc186ad7SThomas Gleixner 		return 1;
96dc186ad7SThomas Gleixner 	default:
97dc186ad7SThomas Gleixner 		return 0;
98dc186ad7SThomas Gleixner 	}
99dc186ad7SThomas Gleixner }
100dc186ad7SThomas Gleixner 
101dc186ad7SThomas Gleixner /*
102dc186ad7SThomas Gleixner  * fixup_activate is called when:
103dc186ad7SThomas Gleixner  * - an active object is activated
104dc186ad7SThomas Gleixner  * - an unknown object is activated (might be a statically initialized object)
105dc186ad7SThomas Gleixner  */
106dc186ad7SThomas Gleixner static int work_fixup_activate(void *addr, enum debug_obj_state state)
107dc186ad7SThomas Gleixner {
108dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
109dc186ad7SThomas Gleixner 
110dc186ad7SThomas Gleixner 	switch (state) {
111dc186ad7SThomas Gleixner 
112dc186ad7SThomas Gleixner 	case ODEBUG_STATE_NOTAVAILABLE:
113dc186ad7SThomas Gleixner 		/*
114dc186ad7SThomas Gleixner 		 * This is not really a fixup. The work struct was
115dc186ad7SThomas Gleixner 		 * statically initialized. We just make sure that it
116dc186ad7SThomas Gleixner 		 * is tracked in the object tracker.
117dc186ad7SThomas Gleixner 		 */
118*22df02bbSTejun Heo 		if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
119dc186ad7SThomas Gleixner 			debug_object_init(work, &work_debug_descr);
120dc186ad7SThomas Gleixner 			debug_object_activate(work, &work_debug_descr);
121dc186ad7SThomas Gleixner 			return 0;
122dc186ad7SThomas Gleixner 		}
123dc186ad7SThomas Gleixner 		WARN_ON_ONCE(1);
124dc186ad7SThomas Gleixner 		return 0;
125dc186ad7SThomas Gleixner 
126dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
127dc186ad7SThomas Gleixner 		WARN_ON(1);
128dc186ad7SThomas Gleixner 
129dc186ad7SThomas Gleixner 	default:
130dc186ad7SThomas Gleixner 		return 0;
131dc186ad7SThomas Gleixner 	}
132dc186ad7SThomas Gleixner }
133dc186ad7SThomas Gleixner 
134dc186ad7SThomas Gleixner /*
135dc186ad7SThomas Gleixner  * fixup_free is called when:
136dc186ad7SThomas Gleixner  * - an active object is freed
137dc186ad7SThomas Gleixner  */
138dc186ad7SThomas Gleixner static int work_fixup_free(void *addr, enum debug_obj_state state)
139dc186ad7SThomas Gleixner {
140dc186ad7SThomas Gleixner 	struct work_struct *work = addr;
141dc186ad7SThomas Gleixner 
142dc186ad7SThomas Gleixner 	switch (state) {
143dc186ad7SThomas Gleixner 	case ODEBUG_STATE_ACTIVE:
144dc186ad7SThomas Gleixner 		cancel_work_sync(work);
145dc186ad7SThomas Gleixner 		debug_object_free(work, &work_debug_descr);
146dc186ad7SThomas Gleixner 		return 1;
147dc186ad7SThomas Gleixner 	default:
148dc186ad7SThomas Gleixner 		return 0;
149dc186ad7SThomas Gleixner 	}
150dc186ad7SThomas Gleixner }
151dc186ad7SThomas Gleixner 
152dc186ad7SThomas Gleixner static struct debug_obj_descr work_debug_descr = {
153dc186ad7SThomas Gleixner 	.name		= "work_struct",
154dc186ad7SThomas Gleixner 	.fixup_init	= work_fixup_init,
155dc186ad7SThomas Gleixner 	.fixup_activate	= work_fixup_activate,
156dc186ad7SThomas Gleixner 	.fixup_free	= work_fixup_free,
157dc186ad7SThomas Gleixner };
158dc186ad7SThomas Gleixner 
159dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work)
160dc186ad7SThomas Gleixner {
161dc186ad7SThomas Gleixner 	debug_object_activate(work, &work_debug_descr);
162dc186ad7SThomas Gleixner }
163dc186ad7SThomas Gleixner 
164dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work)
165dc186ad7SThomas Gleixner {
166dc186ad7SThomas Gleixner 	debug_object_deactivate(work, &work_debug_descr);
167dc186ad7SThomas Gleixner }
168dc186ad7SThomas Gleixner 
169dc186ad7SThomas Gleixner void __init_work(struct work_struct *work, int onstack)
170dc186ad7SThomas Gleixner {
171dc186ad7SThomas Gleixner 	if (onstack)
172dc186ad7SThomas Gleixner 		debug_object_init_on_stack(work, &work_debug_descr);
173dc186ad7SThomas Gleixner 	else
174dc186ad7SThomas Gleixner 		debug_object_init(work, &work_debug_descr);
175dc186ad7SThomas Gleixner }
176dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(__init_work);
177dc186ad7SThomas Gleixner 
178dc186ad7SThomas Gleixner void destroy_work_on_stack(struct work_struct *work)
179dc186ad7SThomas Gleixner {
180dc186ad7SThomas Gleixner 	debug_object_free(work, &work_debug_descr);
181dc186ad7SThomas Gleixner }
182dc186ad7SThomas Gleixner EXPORT_SYMBOL_GPL(destroy_work_on_stack);
183dc186ad7SThomas Gleixner 
184dc186ad7SThomas Gleixner #else
185dc186ad7SThomas Gleixner static inline void debug_work_activate(struct work_struct *work) { }
186dc186ad7SThomas Gleixner static inline void debug_work_deactivate(struct work_struct *work) { }
187dc186ad7SThomas Gleixner #endif
188dc186ad7SThomas Gleixner 
18995402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
19095402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
1911da177e4SLinus Torvalds static LIST_HEAD(workqueues);
1921da177e4SLinus Torvalds 
1933af24433SOleg Nesterov static int singlethread_cpu __read_mostly;
194e7577c50SRusty Russell static const struct cpumask *cpu_singlethread_map __read_mostly;
19514441960SOleg Nesterov /*
19614441960SOleg Nesterov  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
19714441960SOleg Nesterov  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
19814441960SOleg Nesterov  * which comes in between can't use for_each_online_cpu(). We could
19914441960SOleg Nesterov  * use cpu_possible_map, the cpumask below is more a documentation
20014441960SOleg Nesterov  * than optimization.
20114441960SOleg Nesterov  */
202e7577c50SRusty Russell static cpumask_var_t cpu_populated_map __read_mostly;
203f756d5e2SNathan Lynch 
2041da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
20597e37d7bSTejun Heo static inline bool is_wq_single_threaded(struct workqueue_struct *wq)
2061da177e4SLinus Torvalds {
20797e37d7bSTejun Heo 	return wq->flags & WQ_SINGLE_THREAD;
2081da177e4SLinus Torvalds }
2091da177e4SLinus Torvalds 
210e7577c50SRusty Russell static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
211b1f4ec17SOleg Nesterov {
2126cc88bc4SDavid Howells 	return is_wq_single_threaded(wq)
213e7577c50SRusty Russell 		? cpu_singlethread_map : cpu_populated_map;
214b1f4ec17SOleg Nesterov }
215b1f4ec17SOleg Nesterov 
2164690c4abSTejun Heo static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
2174690c4abSTejun Heo 					    struct workqueue_struct *wq)
218a848e3b6SOleg Nesterov {
2196cc88bc4SDavid Howells 	if (unlikely(is_wq_single_threaded(wq)))
220a848e3b6SOleg Nesterov 		cpu = singlethread_cpu;
221a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
222a848e3b6SOleg Nesterov }
223a848e3b6SOleg Nesterov 
2244594bf15SDavid Howells /*
2254594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
2264594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
2274594bf15SDavid Howells  */
228ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
2294690c4abSTejun Heo 			       struct cpu_workqueue_struct *cwq,
2304690c4abSTejun Heo 			       unsigned long extra_flags)
231365970a1SDavid Howells {
2324594bf15SDavid Howells 	BUG_ON(!work_pending(work));
2334594bf15SDavid Howells 
2344690c4abSTejun Heo 	atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
235*22df02bbSTejun Heo 			WORK_STRUCT_PENDING | extra_flags);
236365970a1SDavid Howells }
237365970a1SDavid Howells 
2384d707b9fSOleg Nesterov /*
2394d707b9fSOleg Nesterov  * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
2404d707b9fSOleg Nesterov  */
2414d707b9fSOleg Nesterov static inline void clear_wq_data(struct work_struct *work)
2424d707b9fSOleg Nesterov {
2434690c4abSTejun Heo 	atomic_long_set(&work->data, work_static(work));
2444d707b9fSOleg Nesterov }
2454d707b9fSOleg Nesterov 
246ed7c0feeSOleg Nesterov static inline
247ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
248365970a1SDavid Howells {
249a08727baSLinus Torvalds 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
250365970a1SDavid Howells }
251365970a1SDavid Howells 
2524690c4abSTejun Heo /**
2534690c4abSTejun Heo  * insert_work - insert a work into cwq
2544690c4abSTejun Heo  * @cwq: cwq @work belongs to
2554690c4abSTejun Heo  * @work: work to insert
2564690c4abSTejun Heo  * @head: insertion point
2574690c4abSTejun Heo  * @extra_flags: extra WORK_STRUCT_* flags to set
2584690c4abSTejun Heo  *
2594690c4abSTejun Heo  * Insert @work into @cwq after @head.
2604690c4abSTejun Heo  *
2614690c4abSTejun Heo  * CONTEXT:
2624690c4abSTejun Heo  * spin_lock_irq(cwq->lock).
2634690c4abSTejun Heo  */
264b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
2654690c4abSTejun Heo 			struct work_struct *work, struct list_head *head,
2664690c4abSTejun Heo 			unsigned int extra_flags)
267b89deed3SOleg Nesterov {
268e1d8aa9fSFrederic Weisbecker 	trace_workqueue_insertion(cwq->thread, work);
269e1d8aa9fSFrederic Weisbecker 
2704690c4abSTejun Heo 	/* we own @work, set data and link */
2714690c4abSTejun Heo 	set_wq_data(work, cwq, extra_flags);
2724690c4abSTejun Heo 
2736e84d644SOleg Nesterov 	/*
2746e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
2756e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
2766e84d644SOleg Nesterov 	 */
2776e84d644SOleg Nesterov 	smp_wmb();
2784690c4abSTejun Heo 
2791a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
280b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
281b89deed3SOleg Nesterov }
282b89deed3SOleg Nesterov 
2834690c4abSTejun Heo static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
2841da177e4SLinus Torvalds 			 struct work_struct *work)
2851da177e4SLinus Torvalds {
2864690c4abSTejun Heo 	struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2871da177e4SLinus Torvalds 	unsigned long flags;
2881da177e4SLinus Torvalds 
289dc186ad7SThomas Gleixner 	debug_work_activate(work);
2901da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
2914690c4abSTejun Heo 	BUG_ON(!list_empty(&work->entry));
2924690c4abSTejun Heo 	insert_work(cwq, work, &cwq->worklist, 0);
2931da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
2941da177e4SLinus Torvalds }
2951da177e4SLinus Torvalds 
2960fcb78c2SRolf Eike Beer /**
2970fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
2980fcb78c2SRolf Eike Beer  * @wq: workqueue to use
2990fcb78c2SRolf Eike Beer  * @work: work to queue
3000fcb78c2SRolf Eike Beer  *
301057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
3021da177e4SLinus Torvalds  *
30300dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
30400dfcaf7SOleg Nesterov  * it can be processed by another CPU.
3051da177e4SLinus Torvalds  */
3067ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
3071da177e4SLinus Torvalds {
308ef1ca236SOleg Nesterov 	int ret;
3091da177e4SLinus Torvalds 
310ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
311a848e3b6SOleg Nesterov 	put_cpu();
312ef1ca236SOleg Nesterov 
3131da177e4SLinus Torvalds 	return ret;
3141da177e4SLinus Torvalds }
315ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
3161da177e4SLinus Torvalds 
317c1a220e7SZhang Rui /**
318c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
319c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
320c1a220e7SZhang Rui  * @wq: workqueue to use
321c1a220e7SZhang Rui  * @work: work to queue
322c1a220e7SZhang Rui  *
323c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
324c1a220e7SZhang Rui  *
325c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
326c1a220e7SZhang Rui  * can't go away.
327c1a220e7SZhang Rui  */
328c1a220e7SZhang Rui int
329c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
330c1a220e7SZhang Rui {
331c1a220e7SZhang Rui 	int ret = 0;
332c1a220e7SZhang Rui 
333*22df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
3344690c4abSTejun Heo 		__queue_work(cpu, wq, work);
335c1a220e7SZhang Rui 		ret = 1;
336c1a220e7SZhang Rui 	}
337c1a220e7SZhang Rui 	return ret;
338c1a220e7SZhang Rui }
339c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
340c1a220e7SZhang Rui 
3416d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
3421da177e4SLinus Torvalds {
34352bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
344ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
3451da177e4SLinus Torvalds 
3464690c4abSTejun Heo 	__queue_work(smp_processor_id(), cwq->wq, &dwork->work);
3471da177e4SLinus Torvalds }
3481da177e4SLinus Torvalds 
3490fcb78c2SRolf Eike Beer /**
3500fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
3510fcb78c2SRolf Eike Beer  * @wq: workqueue to use
352af9997e4SRandy Dunlap  * @dwork: delayable work to queue
3530fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
3540fcb78c2SRolf Eike Beer  *
355057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
3560fcb78c2SRolf Eike Beer  */
3577ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
35852bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
3591da177e4SLinus Torvalds {
36052bad64dSDavid Howells 	if (delay == 0)
36163bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
3621da177e4SLinus Torvalds 
36363bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
3641da177e4SLinus Torvalds }
365ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
3661da177e4SLinus Torvalds 
3670fcb78c2SRolf Eike Beer /**
3680fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
3690fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
3700fcb78c2SRolf Eike Beer  * @wq: workqueue to use
371af9997e4SRandy Dunlap  * @dwork: work to queue
3720fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
3730fcb78c2SRolf Eike Beer  *
374057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
3750fcb78c2SRolf Eike Beer  */
3767a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
37752bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
3787a6bc1cdSVenkatesh Pallipadi {
3797a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
38052bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
38152bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
3827a6bc1cdSVenkatesh Pallipadi 
383*22df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
3847a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
3857a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
3867a6bc1cdSVenkatesh Pallipadi 
3878a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
3888a3e77ccSAndrew Liu 
389ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
3904690c4abSTejun Heo 		set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0);
3917a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
39252bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
3937a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
39463bc0362SOleg Nesterov 
39563bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
3967a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
39763bc0362SOleg Nesterov 		else
39863bc0362SOleg Nesterov 			add_timer(timer);
3997a6bc1cdSVenkatesh Pallipadi 		ret = 1;
4007a6bc1cdSVenkatesh Pallipadi 	}
4017a6bc1cdSVenkatesh Pallipadi 	return ret;
4027a6bc1cdSVenkatesh Pallipadi }
403ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
4041da177e4SLinus Torvalds 
405858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
4061da177e4SLinus Torvalds {
407f293ea92SOleg Nesterov 	spin_lock_irq(&cwq->lock);
4081da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
4091da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
4101da177e4SLinus Torvalds 						struct work_struct, entry);
4116bb49e59SDavid Howells 		work_func_t f = work->func;
4124e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
4134e6045f1SJohannes Berg 		/*
4144e6045f1SJohannes Berg 		 * It is permissible to free the struct work_struct
4154e6045f1SJohannes Berg 		 * from inside the function that is called from it,
4164e6045f1SJohannes Berg 		 * this we need to take into account for lockdep too.
4174e6045f1SJohannes Berg 		 * To avoid bogus "held lock freed" warnings as well
4184e6045f1SJohannes Berg 		 * as problems when looking into work->lockdep_map,
4194e6045f1SJohannes Berg 		 * make a copy and use that here.
4204e6045f1SJohannes Berg 		 */
4214e6045f1SJohannes Berg 		struct lockdep_map lockdep_map = work->lockdep_map;
4224e6045f1SJohannes Berg #endif
423e1d8aa9fSFrederic Weisbecker 		trace_workqueue_execution(cwq->thread, work);
424dc186ad7SThomas Gleixner 		debug_work_deactivate(work);
425b89deed3SOleg Nesterov 		cwq->current_work = work;
4261da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
427f293ea92SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
4281da177e4SLinus Torvalds 
429365970a1SDavid Howells 		BUG_ON(get_wq_data(work) != cwq);
43023b2e599SOleg Nesterov 		work_clear_pending(work);
4313295f0efSIngo Molnar 		lock_map_acquire(&cwq->wq->lockdep_map);
4323295f0efSIngo Molnar 		lock_map_acquire(&lockdep_map);
43365f27f38SDavid Howells 		f(work);
4343295f0efSIngo Molnar 		lock_map_release(&lockdep_map);
4353295f0efSIngo Molnar 		lock_map_release(&cwq->wq->lockdep_map);
4361da177e4SLinus Torvalds 
437d5abe669SPeter Zijlstra 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
438d5abe669SPeter Zijlstra 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
439d5abe669SPeter Zijlstra 					"%s/0x%08x/%d\n",
440d5abe669SPeter Zijlstra 					current->comm, preempt_count(),
441ba25f9dcSPavel Emelyanov 				       	task_pid_nr(current));
442d5abe669SPeter Zijlstra 			printk(KERN_ERR "    last function: ");
443d5abe669SPeter Zijlstra 			print_symbol("%s\n", (unsigned long)f);
444d5abe669SPeter Zijlstra 			debug_show_held_locks(current);
445d5abe669SPeter Zijlstra 			dump_stack();
446d5abe669SPeter Zijlstra 		}
447d5abe669SPeter Zijlstra 
448f293ea92SOleg Nesterov 		spin_lock_irq(&cwq->lock);
449b89deed3SOleg Nesterov 		cwq->current_work = NULL;
4501da177e4SLinus Torvalds 	}
451f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
4521da177e4SLinus Torvalds }
4531da177e4SLinus Torvalds 
4544690c4abSTejun Heo /**
4554690c4abSTejun Heo  * worker_thread - the worker thread function
4564690c4abSTejun Heo  * @__cwq: cwq to serve
4574690c4abSTejun Heo  *
4584690c4abSTejun Heo  * The cwq worker thread function.
4594690c4abSTejun Heo  */
4601da177e4SLinus Torvalds static int worker_thread(void *__cwq)
4611da177e4SLinus Torvalds {
4621da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
4633af24433SOleg Nesterov 	DEFINE_WAIT(wait);
4641da177e4SLinus Torvalds 
46597e37d7bSTejun Heo 	if (cwq->wq->flags & WQ_FREEZEABLE)
46683144186SRafael J. Wysocki 		set_freezable();
4671da177e4SLinus Torvalds 
4683af24433SOleg Nesterov 	for (;;) {
4693af24433SOleg Nesterov 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
47014441960SOleg Nesterov 		if (!freezing(current) &&
47114441960SOleg Nesterov 		    !kthread_should_stop() &&
47214441960SOleg Nesterov 		    list_empty(&cwq->worklist))
4731da177e4SLinus Torvalds 			schedule();
4743af24433SOleg Nesterov 		finish_wait(&cwq->more_work, &wait);
4751da177e4SLinus Torvalds 
47685f4186aSOleg Nesterov 		try_to_freeze();
47785f4186aSOleg Nesterov 
47814441960SOleg Nesterov 		if (kthread_should_stop())
4793af24433SOleg Nesterov 			break;
4803af24433SOleg Nesterov 
4811da177e4SLinus Torvalds 		run_workqueue(cwq);
4821da177e4SLinus Torvalds 	}
4833af24433SOleg Nesterov 
4841da177e4SLinus Torvalds 	return 0;
4851da177e4SLinus Torvalds }
4861da177e4SLinus Torvalds 
487fc2e4d70SOleg Nesterov struct wq_barrier {
488fc2e4d70SOleg Nesterov 	struct work_struct	work;
489fc2e4d70SOleg Nesterov 	struct completion	done;
490fc2e4d70SOleg Nesterov };
491fc2e4d70SOleg Nesterov 
492fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
493fc2e4d70SOleg Nesterov {
494fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
495fc2e4d70SOleg Nesterov 	complete(&barr->done);
496fc2e4d70SOleg Nesterov }
497fc2e4d70SOleg Nesterov 
4984690c4abSTejun Heo /**
4994690c4abSTejun Heo  * insert_wq_barrier - insert a barrier work
5004690c4abSTejun Heo  * @cwq: cwq to insert barrier into
5014690c4abSTejun Heo  * @barr: wq_barrier to insert
5024690c4abSTejun Heo  * @head: insertion point
5034690c4abSTejun Heo  *
5044690c4abSTejun Heo  * Insert barrier @barr into @cwq before @head.
5054690c4abSTejun Heo  *
5064690c4abSTejun Heo  * CONTEXT:
5074690c4abSTejun Heo  * spin_lock_irq(cwq->lock).
5084690c4abSTejun Heo  */
50983c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
5101a4d9b0aSOleg Nesterov 			struct wq_barrier *barr, struct list_head *head)
511fc2e4d70SOleg Nesterov {
512dc186ad7SThomas Gleixner 	/*
513dc186ad7SThomas Gleixner 	 * debugobject calls are safe here even with cwq->lock locked
514dc186ad7SThomas Gleixner 	 * as we know for sure that this will not trigger any of the
515dc186ad7SThomas Gleixner 	 * checks and call back into the fixup functions where we
516dc186ad7SThomas Gleixner 	 * might deadlock.
517dc186ad7SThomas Gleixner 	 */
518dc186ad7SThomas Gleixner 	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
519*22df02bbSTejun Heo 	__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
520fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
52183c22520SOleg Nesterov 
522dc186ad7SThomas Gleixner 	debug_work_activate(&barr->work);
5234690c4abSTejun Heo 	insert_work(cwq, &barr->work, head, 0);
524fc2e4d70SOleg Nesterov }
525fc2e4d70SOleg Nesterov 
52614441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
5271da177e4SLinus Torvalds {
5282355b70fSLai Jiangshan 	int active = 0;
529fc2e4d70SOleg Nesterov 	struct wq_barrier barr;
5301da177e4SLinus Torvalds 
5312355b70fSLai Jiangshan 	WARN_ON(cwq->thread == current);
5322355b70fSLai Jiangshan 
53383c22520SOleg Nesterov 	spin_lock_irq(&cwq->lock);
53483c22520SOleg Nesterov 	if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
5351a4d9b0aSOleg Nesterov 		insert_wq_barrier(cwq, &barr, &cwq->worklist);
53683c22520SOleg Nesterov 		active = 1;
53783c22520SOleg Nesterov 	}
53883c22520SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
5391da177e4SLinus Torvalds 
540dc186ad7SThomas Gleixner 	if (active) {
541fc2e4d70SOleg Nesterov 		wait_for_completion(&barr.done);
542dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
543dc186ad7SThomas Gleixner 	}
54414441960SOleg Nesterov 
54514441960SOleg Nesterov 	return active;
54683c22520SOleg Nesterov }
5471da177e4SLinus Torvalds 
5480fcb78c2SRolf Eike Beer /**
5491da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
5500fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
5511da177e4SLinus Torvalds  *
5521da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
5531da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
5541da177e4SLinus Torvalds  *
555fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
556fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
5571da177e4SLinus Torvalds  */
5587ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
5591da177e4SLinus Torvalds {
560e7577c50SRusty Russell 	const struct cpumask *cpu_map = wq_cpu_map(wq);
561cce1a165SOleg Nesterov 	int cpu;
562b1f4ec17SOleg Nesterov 
563f293ea92SOleg Nesterov 	might_sleep();
5643295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
5653295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
566aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
56789ada679SChristoph Lameter 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
5681da177e4SLinus Torvalds }
569ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
5701da177e4SLinus Torvalds 
571db700897SOleg Nesterov /**
572db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
573db700897SOleg Nesterov  * @work: the work which is to be flushed
574db700897SOleg Nesterov  *
575a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
576a67da70dSOleg Nesterov  *
577db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
578db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
579db700897SOleg Nesterov  * sense to use this function.
580db700897SOleg Nesterov  */
581db700897SOleg Nesterov int flush_work(struct work_struct *work)
582db700897SOleg Nesterov {
583db700897SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
584db700897SOleg Nesterov 	struct list_head *prev;
585db700897SOleg Nesterov 	struct wq_barrier barr;
586db700897SOleg Nesterov 
587db700897SOleg Nesterov 	might_sleep();
588db700897SOleg Nesterov 	cwq = get_wq_data(work);
589db700897SOleg Nesterov 	if (!cwq)
590db700897SOleg Nesterov 		return 0;
591db700897SOleg Nesterov 
5923295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
5933295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
594a67da70dSOleg Nesterov 
595db700897SOleg Nesterov 	spin_lock_irq(&cwq->lock);
596db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
597db700897SOleg Nesterov 		/*
598db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
599db700897SOleg Nesterov 		 * If it was re-queued under us we are not going to wait.
600db700897SOleg Nesterov 		 */
601db700897SOleg Nesterov 		smp_rmb();
602db700897SOleg Nesterov 		if (unlikely(cwq != get_wq_data(work)))
6034690c4abSTejun Heo 			goto already_gone;
604db700897SOleg Nesterov 		prev = &work->entry;
605db700897SOleg Nesterov 	} else {
606db700897SOleg Nesterov 		if (cwq->current_work != work)
6074690c4abSTejun Heo 			goto already_gone;
608db700897SOleg Nesterov 		prev = &cwq->worklist;
609db700897SOleg Nesterov 	}
610db700897SOleg Nesterov 	insert_wq_barrier(cwq, &barr, prev->next);
611db700897SOleg Nesterov 
6124690c4abSTejun Heo 	spin_unlock_irq(&cwq->lock);
613db700897SOleg Nesterov 	wait_for_completion(&barr.done);
614dc186ad7SThomas Gleixner 	destroy_work_on_stack(&barr.work);
615db700897SOleg Nesterov 	return 1;
6164690c4abSTejun Heo already_gone:
6174690c4abSTejun Heo 	spin_unlock_irq(&cwq->lock);
6184690c4abSTejun Heo 	return 0;
619db700897SOleg Nesterov }
620db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
621db700897SOleg Nesterov 
6226e84d644SOleg Nesterov /*
6231f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6246e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
6256e84d644SOleg Nesterov  */
6266e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
6276e84d644SOleg Nesterov {
6286e84d644SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
6291f1f642eSOleg Nesterov 	int ret = -1;
6306e84d644SOleg Nesterov 
631*22df02bbSTejun Heo 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
6321f1f642eSOleg Nesterov 		return 0;
6336e84d644SOleg Nesterov 
6346e84d644SOleg Nesterov 	/*
6356e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
6366e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
6376e84d644SOleg Nesterov 	 */
6386e84d644SOleg Nesterov 
6396e84d644SOleg Nesterov 	cwq = get_wq_data(work);
6406e84d644SOleg Nesterov 	if (!cwq)
6416e84d644SOleg Nesterov 		return ret;
6426e84d644SOleg Nesterov 
6436e84d644SOleg Nesterov 	spin_lock_irq(&cwq->lock);
6446e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
6456e84d644SOleg Nesterov 		/*
6466e84d644SOleg Nesterov 		 * This work is queued, but perhaps we locked the wrong cwq.
6476e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
6486e84d644SOleg Nesterov 		 * insert_work()->wmb().
6496e84d644SOleg Nesterov 		 */
6506e84d644SOleg Nesterov 		smp_rmb();
6516e84d644SOleg Nesterov 		if (cwq == get_wq_data(work)) {
652dc186ad7SThomas Gleixner 			debug_work_deactivate(work);
6536e84d644SOleg Nesterov 			list_del_init(&work->entry);
6546e84d644SOleg Nesterov 			ret = 1;
6556e84d644SOleg Nesterov 		}
6566e84d644SOleg Nesterov 	}
6576e84d644SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
6586e84d644SOleg Nesterov 
6596e84d644SOleg Nesterov 	return ret;
6606e84d644SOleg Nesterov }
6616e84d644SOleg Nesterov 
6626e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
663b89deed3SOleg Nesterov 				struct work_struct *work)
664b89deed3SOleg Nesterov {
665b89deed3SOleg Nesterov 	struct wq_barrier barr;
666b89deed3SOleg Nesterov 	int running = 0;
667b89deed3SOleg Nesterov 
668b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
669b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
6701a4d9b0aSOleg Nesterov 		insert_wq_barrier(cwq, &barr, cwq->worklist.next);
671b89deed3SOleg Nesterov 		running = 1;
672b89deed3SOleg Nesterov 	}
673b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
674b89deed3SOleg Nesterov 
675dc186ad7SThomas Gleixner 	if (unlikely(running)) {
676b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
677dc186ad7SThomas Gleixner 		destroy_work_on_stack(&barr.work);
678dc186ad7SThomas Gleixner 	}
679b89deed3SOleg Nesterov }
680b89deed3SOleg Nesterov 
6816e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
682b89deed3SOleg Nesterov {
683b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
68428e53bddSOleg Nesterov 	struct workqueue_struct *wq;
685e7577c50SRusty Russell 	const struct cpumask *cpu_map;
686b1f4ec17SOleg Nesterov 	int cpu;
687b89deed3SOleg Nesterov 
688f293ea92SOleg Nesterov 	might_sleep();
689f293ea92SOleg Nesterov 
6903295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
6913295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
6924e6045f1SJohannes Berg 
693b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
694b89deed3SOleg Nesterov 	if (!cwq)
6953af24433SOleg Nesterov 		return;
696b89deed3SOleg Nesterov 
69728e53bddSOleg Nesterov 	wq = cwq->wq;
69828e53bddSOleg Nesterov 	cpu_map = wq_cpu_map(wq);
69928e53bddSOleg Nesterov 
700aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
7014690c4abSTejun Heo 		wait_on_cpu_work(get_cwq(cpu, wq), work);
7026e84d644SOleg Nesterov }
7036e84d644SOleg Nesterov 
7041f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
7051f1f642eSOleg Nesterov 				struct timer_list* timer)
7061f1f642eSOleg Nesterov {
7071f1f642eSOleg Nesterov 	int ret;
7081f1f642eSOleg Nesterov 
7091f1f642eSOleg Nesterov 	do {
7101f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
7111f1f642eSOleg Nesterov 		if (!ret)
7121f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
7131f1f642eSOleg Nesterov 		wait_on_work(work);
7141f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
7151f1f642eSOleg Nesterov 
7164d707b9fSOleg Nesterov 	clear_wq_data(work);
7171f1f642eSOleg Nesterov 	return ret;
7181f1f642eSOleg Nesterov }
7191f1f642eSOleg Nesterov 
7206e84d644SOleg Nesterov /**
7216e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
7226e84d644SOleg Nesterov  * @work: the work which is to be flushed
7236e84d644SOleg Nesterov  *
7241f1f642eSOleg Nesterov  * Returns true if @work was pending.
7251f1f642eSOleg Nesterov  *
7266e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
7276e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
7286e84d644SOleg Nesterov  * has completed.
7296e84d644SOleg Nesterov  *
7306e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
7316e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
7326e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
7336e84d644SOleg Nesterov  * workqueue.
7346e84d644SOleg Nesterov  *
7356e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
7366e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
7376e84d644SOleg Nesterov  *
7386e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
7396e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
7406e84d644SOleg Nesterov  */
7411f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
7426e84d644SOleg Nesterov {
7431f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
744b89deed3SOleg Nesterov }
74528e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
746b89deed3SOleg Nesterov 
7476e84d644SOleg Nesterov /**
748f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
7496e84d644SOleg Nesterov  * @dwork: the delayed work struct
7506e84d644SOleg Nesterov  *
7511f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
7521f1f642eSOleg Nesterov  *
7536e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
7546e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
7556e84d644SOleg Nesterov  */
7561f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
7576e84d644SOleg Nesterov {
7581f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
7596e84d644SOleg Nesterov }
760f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
7611da177e4SLinus Torvalds 
7626e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
7631da177e4SLinus Torvalds 
7640fcb78c2SRolf Eike Beer /**
7650fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
7660fcb78c2SRolf Eike Beer  * @work: job to be done
7670fcb78c2SRolf Eike Beer  *
7685b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
7695b0f437dSBart Van Assche  * non-zero otherwise.
7705b0f437dSBart Van Assche  *
7715b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
7725b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
7735b0f437dSBart Van Assche  * workqueue otherwise.
7740fcb78c2SRolf Eike Beer  */
7757ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
7761da177e4SLinus Torvalds {
7771da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
7781da177e4SLinus Torvalds }
779ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
7801da177e4SLinus Torvalds 
781c1a220e7SZhang Rui /*
782c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
783c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
784c1a220e7SZhang Rui  * @work: job to be done
785c1a220e7SZhang Rui  *
786c1a220e7SZhang Rui  * This puts a job on a specific cpu
787c1a220e7SZhang Rui  */
788c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
789c1a220e7SZhang Rui {
790c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
791c1a220e7SZhang Rui }
792c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
793c1a220e7SZhang Rui 
7940fcb78c2SRolf Eike Beer /**
7950fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
79652bad64dSDavid Howells  * @dwork: job to be done
79752bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
7980fcb78c2SRolf Eike Beer  *
7990fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
8000fcb78c2SRolf Eike Beer  * workqueue.
8010fcb78c2SRolf Eike Beer  */
8027ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
80382f67cd9SIngo Molnar 					unsigned long delay)
8041da177e4SLinus Torvalds {
80552bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
8061da177e4SLinus Torvalds }
807ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
8081da177e4SLinus Torvalds 
8090fcb78c2SRolf Eike Beer /**
8108c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
8118c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
8128c53e463SLinus Torvalds  *
8138c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
8148c53e463SLinus Torvalds  */
8158c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
8168c53e463SLinus Torvalds {
8178c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
8184690c4abSTejun Heo 		__queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
8194690c4abSTejun Heo 			     &dwork->work);
8208c53e463SLinus Torvalds 		put_cpu();
8218c53e463SLinus Torvalds 	}
8228c53e463SLinus Torvalds 	flush_work(&dwork->work);
8238c53e463SLinus Torvalds }
8248c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
8258c53e463SLinus Torvalds 
8268c53e463SLinus Torvalds /**
8270fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
8280fcb78c2SRolf Eike Beer  * @cpu: cpu to use
82952bad64dSDavid Howells  * @dwork: job to be done
8300fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
8310fcb78c2SRolf Eike Beer  *
8320fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
8330fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
8340fcb78c2SRolf Eike Beer  */
8351da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
83652bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
8371da177e4SLinus Torvalds {
83852bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
8391da177e4SLinus Torvalds }
840ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
8411da177e4SLinus Torvalds 
842b6136773SAndrew Morton /**
843b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
844b6136773SAndrew Morton  * @func: the function to call
845b6136773SAndrew Morton  *
846b6136773SAndrew Morton  * Returns zero on success.
847b6136773SAndrew Morton  * Returns -ve errno on failure.
848b6136773SAndrew Morton  *
849b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
850b6136773SAndrew Morton  */
85165f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
85215316ba8SChristoph Lameter {
85315316ba8SChristoph Lameter 	int cpu;
85465a64464SAndi Kleen 	int orig = -1;
855b6136773SAndrew Morton 	struct work_struct *works;
85615316ba8SChristoph Lameter 
857b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
858b6136773SAndrew Morton 	if (!works)
85915316ba8SChristoph Lameter 		return -ENOMEM;
860b6136773SAndrew Morton 
86195402b38SGautham R Shenoy 	get_online_cpus();
86293981800STejun Heo 
86393981800STejun Heo 	/*
86493981800STejun Heo 	 * When running in keventd don't schedule a work item on
86593981800STejun Heo 	 * itself.  Can just call directly because the work queue is
86693981800STejun Heo 	 * already bound.  This also is faster.
86793981800STejun Heo 	 */
86893981800STejun Heo 	if (current_is_keventd())
86993981800STejun Heo 		orig = raw_smp_processor_id();
87093981800STejun Heo 
87115316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
8729bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
8739bfb1839SIngo Molnar 
8749bfb1839SIngo Molnar 		INIT_WORK(work, func);
87593981800STejun Heo 		if (cpu != orig)
8768de6d308SOleg Nesterov 			schedule_work_on(cpu, work);
87715316ba8SChristoph Lameter 	}
87893981800STejun Heo 	if (orig >= 0)
87993981800STejun Heo 		func(per_cpu_ptr(works, orig));
88093981800STejun Heo 
88193981800STejun Heo 	for_each_online_cpu(cpu)
8828616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
88393981800STejun Heo 
88495402b38SGautham R Shenoy 	put_online_cpus();
885b6136773SAndrew Morton 	free_percpu(works);
88615316ba8SChristoph Lameter 	return 0;
88715316ba8SChristoph Lameter }
88815316ba8SChristoph Lameter 
889eef6a7d5SAlan Stern /**
890eef6a7d5SAlan Stern  * flush_scheduled_work - ensure that any scheduled work has run to completion.
891eef6a7d5SAlan Stern  *
892eef6a7d5SAlan Stern  * Forces execution of the kernel-global workqueue and blocks until its
893eef6a7d5SAlan Stern  * completion.
894eef6a7d5SAlan Stern  *
895eef6a7d5SAlan Stern  * Think twice before calling this function!  It's very easy to get into
896eef6a7d5SAlan Stern  * trouble if you don't take great care.  Either of the following situations
897eef6a7d5SAlan Stern  * will lead to deadlock:
898eef6a7d5SAlan Stern  *
899eef6a7d5SAlan Stern  *	One of the work items currently on the workqueue needs to acquire
900eef6a7d5SAlan Stern  *	a lock held by your code or its caller.
901eef6a7d5SAlan Stern  *
902eef6a7d5SAlan Stern  *	Your code is running in the context of a work routine.
903eef6a7d5SAlan Stern  *
904eef6a7d5SAlan Stern  * They will be detected by lockdep when they occur, but the first might not
905eef6a7d5SAlan Stern  * occur very often.  It depends on what work items are on the workqueue and
906eef6a7d5SAlan Stern  * what locks they need, which you have no control over.
907eef6a7d5SAlan Stern  *
908eef6a7d5SAlan Stern  * In most situations flushing the entire workqueue is overkill; you merely
909eef6a7d5SAlan Stern  * need to know that a particular work item isn't queued and isn't running.
910eef6a7d5SAlan Stern  * In such cases you should use cancel_delayed_work_sync() or
911eef6a7d5SAlan Stern  * cancel_work_sync() instead.
912eef6a7d5SAlan Stern  */
9131da177e4SLinus Torvalds void flush_scheduled_work(void)
9141da177e4SLinus Torvalds {
9151da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
9161da177e4SLinus Torvalds }
917ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
9181da177e4SLinus Torvalds 
9191da177e4SLinus Torvalds /**
9201fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
9211fa44ecaSJames Bottomley  * @fn:		the function to execute
9221fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
9231fa44ecaSJames Bottomley  *		be available when the work executes)
9241fa44ecaSJames Bottomley  *
9251fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
9261fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
9271fa44ecaSJames Bottomley  *
9281fa44ecaSJames Bottomley  * Returns:	0 - function was executed
9291fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
9301fa44ecaSJames Bottomley  */
93165f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
9321fa44ecaSJames Bottomley {
9331fa44ecaSJames Bottomley 	if (!in_interrupt()) {
93465f27f38SDavid Howells 		fn(&ew->work);
9351fa44ecaSJames Bottomley 		return 0;
9361fa44ecaSJames Bottomley 	}
9371fa44ecaSJames Bottomley 
93865f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
9391fa44ecaSJames Bottomley 	schedule_work(&ew->work);
9401fa44ecaSJames Bottomley 
9411fa44ecaSJames Bottomley 	return 1;
9421fa44ecaSJames Bottomley }
9431fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
9441fa44ecaSJames Bottomley 
9451da177e4SLinus Torvalds int keventd_up(void)
9461da177e4SLinus Torvalds {
9471da177e4SLinus Torvalds 	return keventd_wq != NULL;
9481da177e4SLinus Torvalds }
9491da177e4SLinus Torvalds 
9501da177e4SLinus Torvalds int current_is_keventd(void)
9511da177e4SLinus Torvalds {
9521da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
953d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
9541da177e4SLinus Torvalds 	int ret = 0;
9551da177e4SLinus Torvalds 
9561da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
9571da177e4SLinus Torvalds 
95889ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
9591da177e4SLinus Torvalds 	if (current == cwq->thread)
9601da177e4SLinus Torvalds 		ret = 1;
9611da177e4SLinus Torvalds 
9621da177e4SLinus Torvalds 	return ret;
9631da177e4SLinus Torvalds 
9641da177e4SLinus Torvalds }
9651da177e4SLinus Torvalds 
9663af24433SOleg Nesterov static struct cpu_workqueue_struct *
9673af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
9681da177e4SLinus Torvalds {
96989ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
9703af24433SOleg Nesterov 
9713af24433SOleg Nesterov 	cwq->wq = wq;
9723af24433SOleg Nesterov 	spin_lock_init(&cwq->lock);
9733af24433SOleg Nesterov 	INIT_LIST_HEAD(&cwq->worklist);
9743af24433SOleg Nesterov 	init_waitqueue_head(&cwq->more_work);
9753af24433SOleg Nesterov 
9763af24433SOleg Nesterov 	return cwq;
9773af24433SOleg Nesterov }
9783af24433SOleg Nesterov 
9793af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
9803af24433SOleg Nesterov {
9813af24433SOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
9826cc88bc4SDavid Howells 	const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
9833af24433SOleg Nesterov 	struct task_struct *p;
9843af24433SOleg Nesterov 
9853af24433SOleg Nesterov 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
9863af24433SOleg Nesterov 	/*
9873af24433SOleg Nesterov 	 * Nobody can add the work_struct to this cwq,
9883af24433SOleg Nesterov 	 *	if (caller is __create_workqueue)
9893af24433SOleg Nesterov 	 *		nobody should see this wq
9903af24433SOleg Nesterov 	 *	else // caller is CPU_UP_PREPARE
9913af24433SOleg Nesterov 	 *		cpu is not on cpu_online_map
9923af24433SOleg Nesterov 	 * so we can abort safely.
9933af24433SOleg Nesterov 	 */
9943af24433SOleg Nesterov 	if (IS_ERR(p))
9953af24433SOleg Nesterov 		return PTR_ERR(p);
9963af24433SOleg Nesterov 	cwq->thread = p;
9973af24433SOleg Nesterov 
998e1d8aa9fSFrederic Weisbecker 	trace_workqueue_creation(cwq->thread, cpu);
999e1d8aa9fSFrederic Weisbecker 
10003af24433SOleg Nesterov 	return 0;
10013af24433SOleg Nesterov }
10023af24433SOleg Nesterov 
100306ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
100406ba38a9SOleg Nesterov {
100506ba38a9SOleg Nesterov 	struct task_struct *p = cwq->thread;
100606ba38a9SOleg Nesterov 
100706ba38a9SOleg Nesterov 	if (p != NULL) {
100806ba38a9SOleg Nesterov 		if (cpu >= 0)
100906ba38a9SOleg Nesterov 			kthread_bind(p, cpu);
101006ba38a9SOleg Nesterov 		wake_up_process(p);
101106ba38a9SOleg Nesterov 	}
101206ba38a9SOleg Nesterov }
101306ba38a9SOleg Nesterov 
10144e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
101597e37d7bSTejun Heo 						unsigned int flags,
1016eb13ba87SJohannes Berg 						struct lock_class_key *key,
1017eb13ba87SJohannes Berg 						const char *lock_name)
10183af24433SOleg Nesterov {
10193af24433SOleg Nesterov 	struct workqueue_struct *wq;
10203af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
10213af24433SOleg Nesterov 	int err = 0, cpu;
10223af24433SOleg Nesterov 
10233af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
10243af24433SOleg Nesterov 	if (!wq)
10254690c4abSTejun Heo 		goto err;
10263af24433SOleg Nesterov 
10273af24433SOleg Nesterov 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
10284690c4abSTejun Heo 	if (!wq->cpu_wq)
10294690c4abSTejun Heo 		goto err;
10303af24433SOleg Nesterov 
103197e37d7bSTejun Heo 	wq->flags = flags;
10323af24433SOleg Nesterov 	wq->name = name;
1033eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1034cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
10353af24433SOleg Nesterov 
103697e37d7bSTejun Heo 	if (flags & WQ_SINGLE_THREAD) {
10373af24433SOleg Nesterov 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
10383af24433SOleg Nesterov 		err = create_workqueue_thread(cwq, singlethread_cpu);
103906ba38a9SOleg Nesterov 		start_workqueue_thread(cwq, -1);
10403af24433SOleg Nesterov 	} else {
10413da1c84cSOleg Nesterov 		cpu_maps_update_begin();
10426af8bf3dSOleg Nesterov 		/*
10436af8bf3dSOleg Nesterov 		 * We must place this wq on list even if the code below fails.
10446af8bf3dSOleg Nesterov 		 * cpu_down(cpu) can remove cpu from cpu_populated_map before
10456af8bf3dSOleg Nesterov 		 * destroy_workqueue() takes the lock, in that case we leak
10466af8bf3dSOleg Nesterov 		 * cwq[cpu]->thread.
10476af8bf3dSOleg Nesterov 		 */
104895402b38SGautham R Shenoy 		spin_lock(&workqueue_lock);
10493af24433SOleg Nesterov 		list_add(&wq->list, &workqueues);
105095402b38SGautham R Shenoy 		spin_unlock(&workqueue_lock);
10516af8bf3dSOleg Nesterov 		/*
10526af8bf3dSOleg Nesterov 		 * We must initialize cwqs for each possible cpu even if we
10536af8bf3dSOleg Nesterov 		 * are going to call destroy_workqueue() finally. Otherwise
10546af8bf3dSOleg Nesterov 		 * cpu_up() can hit the uninitialized cwq once we drop the
10556af8bf3dSOleg Nesterov 		 * lock.
10566af8bf3dSOleg Nesterov 		 */
10573af24433SOleg Nesterov 		for_each_possible_cpu(cpu) {
10583af24433SOleg Nesterov 			cwq = init_cpu_workqueue(wq, cpu);
10593af24433SOleg Nesterov 			if (err || !cpu_online(cpu))
10603af24433SOleg Nesterov 				continue;
10613af24433SOleg Nesterov 			err = create_workqueue_thread(cwq, cpu);
106206ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
10633af24433SOleg Nesterov 		}
10643da1c84cSOleg Nesterov 		cpu_maps_update_done();
10653af24433SOleg Nesterov 	}
10663af24433SOleg Nesterov 
10673af24433SOleg Nesterov 	if (err) {
10683af24433SOleg Nesterov 		destroy_workqueue(wq);
10693af24433SOleg Nesterov 		wq = NULL;
10703af24433SOleg Nesterov 	}
10713af24433SOleg Nesterov 	return wq;
10724690c4abSTejun Heo err:
10734690c4abSTejun Heo 	if (wq) {
10744690c4abSTejun Heo 		free_percpu(wq->cpu_wq);
10754690c4abSTejun Heo 		kfree(wq);
10764690c4abSTejun Heo 	}
10774690c4abSTejun Heo 	return NULL;
10783af24433SOleg Nesterov }
10794e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
10803af24433SOleg Nesterov 
10811e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
10823af24433SOleg Nesterov {
10833af24433SOleg Nesterov 	/*
10843da1c84cSOleg Nesterov 	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
10853da1c84cSOleg Nesterov 	 * cpu_add_remove_lock protects cwq->thread.
10863af24433SOleg Nesterov 	 */
108714441960SOleg Nesterov 	if (cwq->thread == NULL)
108814441960SOleg Nesterov 		return;
108914441960SOleg Nesterov 
10903295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
10913295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
10924e6045f1SJohannes Berg 
109313c22168SOleg Nesterov 	flush_cpu_workqueue(cwq);
109414441960SOleg Nesterov 	/*
10953da1c84cSOleg Nesterov 	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
109613c22168SOleg Nesterov 	 * a concurrent flush_workqueue() can insert a barrier after us.
109713c22168SOleg Nesterov 	 * However, in that case run_workqueue() won't return and check
109813c22168SOleg Nesterov 	 * kthread_should_stop() until it flushes all work_struct's.
109914441960SOleg Nesterov 	 * When ->worklist becomes empty it is safe to exit because no
110014441960SOleg Nesterov 	 * more work_structs can be queued on this cwq: flush_workqueue
110114441960SOleg Nesterov 	 * checks list_empty(), and a "normal" queue_work() can't use
110214441960SOleg Nesterov 	 * a dead CPU.
110314441960SOleg Nesterov 	 */
1104e1d8aa9fSFrederic Weisbecker 	trace_workqueue_destruction(cwq->thread);
110514441960SOleg Nesterov 	kthread_stop(cwq->thread);
110614441960SOleg Nesterov 	cwq->thread = NULL;
11071da177e4SLinus Torvalds }
11081da177e4SLinus Torvalds 
11093af24433SOleg Nesterov /**
11103af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
11113af24433SOleg Nesterov  * @wq: target workqueue
11123af24433SOleg Nesterov  *
11133af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
11143af24433SOleg Nesterov  */
11153af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
11163af24433SOleg Nesterov {
1117e7577c50SRusty Russell 	const struct cpumask *cpu_map = wq_cpu_map(wq);
11183af24433SOleg Nesterov 	int cpu;
11193af24433SOleg Nesterov 
11203da1c84cSOleg Nesterov 	cpu_maps_update_begin();
112195402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
11223af24433SOleg Nesterov 	list_del(&wq->list);
112395402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
11243af24433SOleg Nesterov 
1125aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
11261e35eaa2SOleg Nesterov 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
11273da1c84cSOleg Nesterov  	cpu_maps_update_done();
11283af24433SOleg Nesterov 
11293af24433SOleg Nesterov 	free_percpu(wq->cpu_wq);
11303af24433SOleg Nesterov 	kfree(wq);
11313af24433SOleg Nesterov }
11323af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
11333af24433SOleg Nesterov 
11349c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
11351da177e4SLinus Torvalds 						unsigned long action,
11361da177e4SLinus Torvalds 						void *hcpu)
11371da177e4SLinus Torvalds {
11383af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
11393af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
11401da177e4SLinus Torvalds 	struct workqueue_struct *wq;
114180b5184cSAkinobu Mita 	int err = 0;
11421da177e4SLinus Torvalds 
11438bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
11448bb78442SRafael J. Wysocki 
11451da177e4SLinus Torvalds 	switch (action) {
11463af24433SOleg Nesterov 	case CPU_UP_PREPARE:
1147e7577c50SRusty Russell 		cpumask_set_cpu(cpu, cpu_populated_map);
11483af24433SOleg Nesterov 	}
11498448502cSOleg Nesterov undo:
11501da177e4SLinus Torvalds 	list_for_each_entry(wq, &workqueues, list) {
11513af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
11523af24433SOleg Nesterov 
11533af24433SOleg Nesterov 		switch (action) {
11543af24433SOleg Nesterov 		case CPU_UP_PREPARE:
115580b5184cSAkinobu Mita 			err = create_workqueue_thread(cwq, cpu);
115680b5184cSAkinobu Mita 			if (!err)
11571da177e4SLinus Torvalds 				break;
115895402b38SGautham R Shenoy 			printk(KERN_ERR "workqueue [%s] for %i failed\n",
115995402b38SGautham R Shenoy 				wq->name, cpu);
11608448502cSOleg Nesterov 			action = CPU_UP_CANCELED;
116180b5184cSAkinobu Mita 			err = -ENOMEM;
11628448502cSOleg Nesterov 			goto undo;
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds 		case CPU_ONLINE:
116506ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
11661da177e4SLinus Torvalds 			break;
11671da177e4SLinus Torvalds 
11681da177e4SLinus Torvalds 		case CPU_UP_CANCELED:
116906ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, -1);
11703da1c84cSOleg Nesterov 		case CPU_POST_DEAD:
11711e35eaa2SOleg Nesterov 			cleanup_workqueue_thread(cwq);
11721da177e4SLinus Torvalds 			break;
11731da177e4SLinus Torvalds 		}
11743af24433SOleg Nesterov 	}
11751da177e4SLinus Torvalds 
117600dfcaf7SOleg Nesterov 	switch (action) {
117700dfcaf7SOleg Nesterov 	case CPU_UP_CANCELED:
11783da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
1179e7577c50SRusty Russell 		cpumask_clear_cpu(cpu, cpu_populated_map);
118000dfcaf7SOleg Nesterov 	}
118100dfcaf7SOleg Nesterov 
118280b5184cSAkinobu Mita 	return notifier_from_errno(err);
11831da177e4SLinus Torvalds }
11841da177e4SLinus Torvalds 
11852d3854a3SRusty Russell #ifdef CONFIG_SMP
11868ccad40dSRusty Russell 
11872d3854a3SRusty Russell struct work_for_cpu {
11886b44003eSAndrew Morton 	struct completion completion;
11892d3854a3SRusty Russell 	long (*fn)(void *);
11902d3854a3SRusty Russell 	void *arg;
11912d3854a3SRusty Russell 	long ret;
11922d3854a3SRusty Russell };
11932d3854a3SRusty Russell 
11946b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
11952d3854a3SRusty Russell {
11966b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
11972d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
11986b44003eSAndrew Morton 	complete(&wfc->completion);
11996b44003eSAndrew Morton 	return 0;
12002d3854a3SRusty Russell }
12012d3854a3SRusty Russell 
12022d3854a3SRusty Russell /**
12032d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
12042d3854a3SRusty Russell  * @cpu: the cpu to run on
12052d3854a3SRusty Russell  * @fn: the function to run
12062d3854a3SRusty Russell  * @arg: the function arg
12072d3854a3SRusty Russell  *
120831ad9081SRusty Russell  * This will return the value @fn returns.
120931ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
12106b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
12112d3854a3SRusty Russell  */
12122d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
12132d3854a3SRusty Russell {
12146b44003eSAndrew Morton 	struct task_struct *sub_thread;
12156b44003eSAndrew Morton 	struct work_for_cpu wfc = {
12166b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
12176b44003eSAndrew Morton 		.fn = fn,
12186b44003eSAndrew Morton 		.arg = arg,
12196b44003eSAndrew Morton 	};
12202d3854a3SRusty Russell 
12216b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
12226b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
12236b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
12246b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
12256b44003eSAndrew Morton 	wake_up_process(sub_thread);
12266b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
12272d3854a3SRusty Russell 	return wfc.ret;
12282d3854a3SRusty Russell }
12292d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
12302d3854a3SRusty Russell #endif /* CONFIG_SMP */
12312d3854a3SRusty Russell 
1232c12920d1SOleg Nesterov void __init init_workqueues(void)
12331da177e4SLinus Torvalds {
1234e7577c50SRusty Russell 	alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1235e7577c50SRusty Russell 
1236e7577c50SRusty Russell 	cpumask_copy(cpu_populated_map, cpu_online_mask);
1237e7577c50SRusty Russell 	singlethread_cpu = cpumask_first(cpu_possible_mask);
1238e7577c50SRusty Russell 	cpu_singlethread_map = cpumask_of(singlethread_cpu);
12391da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
12401da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
12411da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
12421da177e4SLinus Torvalds }
1243