xref: /linux-6.15/kernel/workqueue.c (revision ed7c0fee)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
121da177e4SLinus Torvalds  *   Andrew Morton <[email protected]>
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
1689ada679SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter <[email protected]>.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds /*
37f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
38f756d5e2SNathan Lynch  * possible cpu).
391da177e4SLinus Torvalds  */
401da177e4SLinus Torvalds struct cpu_workqueue_struct {
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds 	spinlock_t lock;
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds 	struct list_head worklist;
451da177e4SLinus Torvalds 	wait_queue_head_t more_work;
463af24433SOleg Nesterov 	struct work_struct *current_work;
471da177e4SLinus Torvalds 
481da177e4SLinus Torvalds 	struct workqueue_struct *wq;
4936c8b586SIngo Molnar 	struct task_struct *thread;
503af24433SOleg Nesterov 	int should_stop;
511da177e4SLinus Torvalds 
521da177e4SLinus Torvalds 	int run_depth;		/* Detect run_workqueue() recursion depth */
531da177e4SLinus Torvalds } ____cacheline_aligned;
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds /*
561da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
571da177e4SLinus Torvalds  * per-CPU workqueues:
581da177e4SLinus Torvalds  */
591da177e4SLinus Torvalds struct workqueue_struct {
6089ada679SChristoph Lameter 	struct cpu_workqueue_struct *cpu_wq;
61cce1a165SOleg Nesterov 	struct list_head list;
621da177e4SLinus Torvalds 	const char *name;
63cce1a165SOleg Nesterov 	int singlethread;
64319c2a98SOleg Nesterov 	int freezeable;		/* Freeze threads during suspend */
651da177e4SLinus Torvalds };
661da177e4SLinus Torvalds 
671da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
681da177e4SLinus Torvalds    threads to each one as cpus come/go. */
699b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex);
701da177e4SLinus Torvalds static LIST_HEAD(workqueues);
711da177e4SLinus Torvalds 
723af24433SOleg Nesterov static int singlethread_cpu __read_mostly;
73b1f4ec17SOleg Nesterov static cpumask_t cpu_singlethread_map __read_mostly;
743af24433SOleg Nesterov /* optimization, we could use cpu_possible_map */
753af24433SOleg Nesterov static cpumask_t cpu_populated_map __read_mostly;
76f756d5e2SNathan Lynch 
771da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
781da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq)
791da177e4SLinus Torvalds {
80cce1a165SOleg Nesterov 	return wq->singlethread;
811da177e4SLinus Torvalds }
821da177e4SLinus Torvalds 
83b1f4ec17SOleg Nesterov static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84b1f4ec17SOleg Nesterov {
85b1f4ec17SOleg Nesterov 	return is_single_threaded(wq)
86b1f4ec17SOleg Nesterov 		? &cpu_singlethread_map : &cpu_populated_map;
87b1f4ec17SOleg Nesterov }
88b1f4ec17SOleg Nesterov 
894594bf15SDavid Howells /*
904594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
914594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
924594bf15SDavid Howells  */
93*ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
94*ed7c0feeSOleg Nesterov 				struct cpu_workqueue_struct *cwq)
95365970a1SDavid Howells {
964594bf15SDavid Howells 	unsigned long new;
97365970a1SDavid Howells 
984594bf15SDavid Howells 	BUG_ON(!work_pending(work));
994594bf15SDavid Howells 
100*ed7c0feeSOleg Nesterov 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
101a08727baSLinus Torvalds 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
102a08727baSLinus Torvalds 	atomic_long_set(&work->data, new);
103365970a1SDavid Howells }
104365970a1SDavid Howells 
105*ed7c0feeSOleg Nesterov static inline
106*ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
107365970a1SDavid Howells {
108a08727baSLinus Torvalds 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
109365970a1SDavid Howells }
110365970a1SDavid Howells 
111b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
112b89deed3SOleg Nesterov 				struct work_struct *work, int tail)
113b89deed3SOleg Nesterov {
114b89deed3SOleg Nesterov 	set_wq_data(work, cwq);
115b89deed3SOleg Nesterov 	if (tail)
116b89deed3SOleg Nesterov 		list_add_tail(&work->entry, &cwq->worklist);
117b89deed3SOleg Nesterov 	else
118b89deed3SOleg Nesterov 		list_add(&work->entry, &cwq->worklist);
119b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
120b89deed3SOleg Nesterov }
121b89deed3SOleg Nesterov 
1221da177e4SLinus Torvalds /* Preempt must be disabled. */
1231da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq,
1241da177e4SLinus Torvalds 			 struct work_struct *work)
1251da177e4SLinus Torvalds {
1261da177e4SLinus Torvalds 	unsigned long flags;
1271da177e4SLinus Torvalds 
1281da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
129b89deed3SOleg Nesterov 	insert_work(cwq, work, 1);
1301da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds 
1330fcb78c2SRolf Eike Beer /**
1340fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
1350fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1360fcb78c2SRolf Eike Beer  * @work: work to queue
1370fcb78c2SRolf Eike Beer  *
138057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1391da177e4SLinus Torvalds  *
1401da177e4SLinus Torvalds  * We queue the work to the CPU it was submitted, but there is no
1411da177e4SLinus Torvalds  * guarantee that it will be processed by that CPU.
1421da177e4SLinus Torvalds  */
1431da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
1441da177e4SLinus Torvalds {
1451da177e4SLinus Torvalds 	int ret = 0, cpu = get_cpu();
1461da177e4SLinus Torvalds 
147a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1481da177e4SLinus Torvalds 		if (unlikely(is_single_threaded(wq)))
149f756d5e2SNathan Lynch 			cpu = singlethread_cpu;
1501da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
15189ada679SChristoph Lameter 		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
1521da177e4SLinus Torvalds 		ret = 1;
1531da177e4SLinus Torvalds 	}
1541da177e4SLinus Torvalds 	put_cpu();
1551da177e4SLinus Torvalds 	return ret;
1561da177e4SLinus Torvalds }
157ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
1581da177e4SLinus Torvalds 
15982f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data)
1601da177e4SLinus Torvalds {
16152bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
162*ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
163*ed7c0feeSOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
1641da177e4SLinus Torvalds 	int cpu = smp_processor_id();
1651da177e4SLinus Torvalds 
1661da177e4SLinus Torvalds 	if (unlikely(is_single_threaded(wq)))
167f756d5e2SNathan Lynch 		cpu = singlethread_cpu;
1681da177e4SLinus Torvalds 
16952bad64dSDavid Howells 	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
1701da177e4SLinus Torvalds }
1711da177e4SLinus Torvalds 
1720fcb78c2SRolf Eike Beer /**
1730fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
1740fcb78c2SRolf Eike Beer  * @wq: workqueue to use
175af9997e4SRandy Dunlap  * @dwork: delayable work to queue
1760fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
1770fcb78c2SRolf Eike Beer  *
178057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1790fcb78c2SRolf Eike Beer  */
1801da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq,
18152bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
1821da177e4SLinus Torvalds {
1831da177e4SLinus Torvalds 	int ret = 0;
18452bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
18552bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
18652bad64dSDavid Howells 
18782f67cd9SIngo Molnar 	timer_stats_timer_set_start_info(timer);
18852bad64dSDavid Howells 	if (delay == 0)
18952bad64dSDavid Howells 		return queue_work(wq, work);
1901da177e4SLinus Torvalds 
191a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1921da177e4SLinus Torvalds 		BUG_ON(timer_pending(timer));
1931da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
1941da177e4SLinus Torvalds 
195*ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
196*ed7c0feeSOleg Nesterov 		set_wq_data(work,
197*ed7c0feeSOleg Nesterov 			per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id()));
1981da177e4SLinus Torvalds 		timer->expires = jiffies + delay;
19952bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
2001da177e4SLinus Torvalds 		timer->function = delayed_work_timer_fn;
2011da177e4SLinus Torvalds 		add_timer(timer);
2021da177e4SLinus Torvalds 		ret = 1;
2031da177e4SLinus Torvalds 	}
2041da177e4SLinus Torvalds 	return ret;
2051da177e4SLinus Torvalds }
206ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
2071da177e4SLinus Torvalds 
2080fcb78c2SRolf Eike Beer /**
2090fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
2100fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
2110fcb78c2SRolf Eike Beer  * @wq: workqueue to use
212af9997e4SRandy Dunlap  * @dwork: work to queue
2130fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2140fcb78c2SRolf Eike Beer  *
215057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2160fcb78c2SRolf Eike Beer  */
2177a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
21852bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2197a6bc1cdSVenkatesh Pallipadi {
2207a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
22152bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
22252bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
2237a6bc1cdSVenkatesh Pallipadi 
224a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
2257a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
2267a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
2277a6bc1cdSVenkatesh Pallipadi 
228*ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
229*ed7c0feeSOleg Nesterov 		set_wq_data(work,
230*ed7c0feeSOleg Nesterov 			per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id()));
2317a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
23252bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
2337a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
2347a6bc1cdSVenkatesh Pallipadi 		add_timer_on(timer, cpu);
2357a6bc1cdSVenkatesh Pallipadi 		ret = 1;
2367a6bc1cdSVenkatesh Pallipadi 	}
2377a6bc1cdSVenkatesh Pallipadi 	return ret;
2387a6bc1cdSVenkatesh Pallipadi }
239ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
2401da177e4SLinus Torvalds 
241858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
2421da177e4SLinus Torvalds {
243f293ea92SOleg Nesterov 	spin_lock_irq(&cwq->lock);
2441da177e4SLinus Torvalds 	cwq->run_depth++;
2451da177e4SLinus Torvalds 	if (cwq->run_depth > 3) {
2461da177e4SLinus Torvalds 		/* morton gets to eat his hat */
2471da177e4SLinus Torvalds 		printk("%s: recursion depth exceeded: %d\n",
2481da177e4SLinus Torvalds 			__FUNCTION__, cwq->run_depth);
2491da177e4SLinus Torvalds 		dump_stack();
2501da177e4SLinus Torvalds 	}
2511da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
2521da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
2531da177e4SLinus Torvalds 						struct work_struct, entry);
2546bb49e59SDavid Howells 		work_func_t f = work->func;
2551da177e4SLinus Torvalds 
256b89deed3SOleg Nesterov 		cwq->current_work = work;
2571da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
258f293ea92SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
2591da177e4SLinus Torvalds 
260365970a1SDavid Howells 		BUG_ON(get_wq_data(work) != cwq);
261a08727baSLinus Torvalds 		if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
26265f27f38SDavid Howells 			work_release(work);
26365f27f38SDavid Howells 		f(work);
2641da177e4SLinus Torvalds 
265d5abe669SPeter Zijlstra 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
266d5abe669SPeter Zijlstra 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
267d5abe669SPeter Zijlstra 					"%s/0x%08x/%d\n",
268d5abe669SPeter Zijlstra 					current->comm, preempt_count(),
269d5abe669SPeter Zijlstra 				       	current->pid);
270d5abe669SPeter Zijlstra 			printk(KERN_ERR "    last function: ");
271d5abe669SPeter Zijlstra 			print_symbol("%s\n", (unsigned long)f);
272d5abe669SPeter Zijlstra 			debug_show_held_locks(current);
273d5abe669SPeter Zijlstra 			dump_stack();
274d5abe669SPeter Zijlstra 		}
275d5abe669SPeter Zijlstra 
276f293ea92SOleg Nesterov 		spin_lock_irq(&cwq->lock);
277b89deed3SOleg Nesterov 		cwq->current_work = NULL;
2781da177e4SLinus Torvalds 	}
2791da177e4SLinus Torvalds 	cwq->run_depth--;
280f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
2811da177e4SLinus Torvalds }
2821da177e4SLinus Torvalds 
2833af24433SOleg Nesterov /*
2843af24433SOleg Nesterov  * NOTE: the caller must not touch *cwq if this func returns true
2853af24433SOleg Nesterov  */
2863af24433SOleg Nesterov static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
2873af24433SOleg Nesterov {
2883af24433SOleg Nesterov 	int should_stop = cwq->should_stop;
2893af24433SOleg Nesterov 
2903af24433SOleg Nesterov 	if (unlikely(should_stop)) {
2913af24433SOleg Nesterov 		spin_lock_irq(&cwq->lock);
2923af24433SOleg Nesterov 		should_stop = cwq->should_stop && list_empty(&cwq->worklist);
2933af24433SOleg Nesterov 		if (should_stop)
2943af24433SOleg Nesterov 			cwq->thread = NULL;
2953af24433SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
2963af24433SOleg Nesterov 	}
2973af24433SOleg Nesterov 
2983af24433SOleg Nesterov 	return should_stop;
2993af24433SOleg Nesterov }
3003af24433SOleg Nesterov 
3011da177e4SLinus Torvalds static int worker_thread(void *__cwq)
3021da177e4SLinus Torvalds {
3031da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
3043af24433SOleg Nesterov 	DEFINE_WAIT(wait);
3051da177e4SLinus Torvalds 	struct k_sigaction sa;
3061da177e4SLinus Torvalds 	sigset_t blocked;
3071da177e4SLinus Torvalds 
308319c2a98SOleg Nesterov 	if (!cwq->wq->freezeable)
3091da177e4SLinus Torvalds 		current->flags |= PF_NOFREEZE;
3101da177e4SLinus Torvalds 
3111da177e4SLinus Torvalds 	set_user_nice(current, -5);
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds 	/* Block and flush all signals */
3141da177e4SLinus Torvalds 	sigfillset(&blocked);
3151da177e4SLinus Torvalds 	sigprocmask(SIG_BLOCK, &blocked, NULL);
3161da177e4SLinus Torvalds 	flush_signals(current);
3171da177e4SLinus Torvalds 
31846934023SChristoph Lameter 	/*
31946934023SChristoph Lameter 	 * We inherited MPOL_INTERLEAVE from the booting kernel.
32046934023SChristoph Lameter 	 * Set MPOL_DEFAULT to insure node local allocations.
32146934023SChristoph Lameter 	 */
32246934023SChristoph Lameter 	numa_default_policy();
32346934023SChristoph Lameter 
3241da177e4SLinus Torvalds 	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
3251da177e4SLinus Torvalds 	sa.sa.sa_handler = SIG_IGN;
3261da177e4SLinus Torvalds 	sa.sa.sa_flags = 0;
3271da177e4SLinus Torvalds 	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
3281da177e4SLinus Torvalds 	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
3291da177e4SLinus Torvalds 
3303af24433SOleg Nesterov 	for (;;) {
331319c2a98SOleg Nesterov 		if (cwq->wq->freezeable)
332341a5958SRafael J. Wysocki 			try_to_freeze();
333341a5958SRafael J. Wysocki 
3343af24433SOleg Nesterov 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
3353af24433SOleg Nesterov 		if (!cwq->should_stop && list_empty(&cwq->worklist))
3361da177e4SLinus Torvalds 			schedule();
3373af24433SOleg Nesterov 		finish_wait(&cwq->more_work, &wait);
3381da177e4SLinus Torvalds 
3393af24433SOleg Nesterov 		if (cwq_should_stop(cwq))
3403af24433SOleg Nesterov 			break;
3413af24433SOleg Nesterov 
3421da177e4SLinus Torvalds 		run_workqueue(cwq);
3431da177e4SLinus Torvalds 	}
3443af24433SOleg Nesterov 
3451da177e4SLinus Torvalds 	return 0;
3461da177e4SLinus Torvalds }
3471da177e4SLinus Torvalds 
348fc2e4d70SOleg Nesterov struct wq_barrier {
349fc2e4d70SOleg Nesterov 	struct work_struct	work;
350fc2e4d70SOleg Nesterov 	struct completion	done;
351fc2e4d70SOleg Nesterov };
352fc2e4d70SOleg Nesterov 
353fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
354fc2e4d70SOleg Nesterov {
355fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
356fc2e4d70SOleg Nesterov 	complete(&barr->done);
357fc2e4d70SOleg Nesterov }
358fc2e4d70SOleg Nesterov 
35983c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
36083c22520SOleg Nesterov 					struct wq_barrier *barr, int tail)
361fc2e4d70SOleg Nesterov {
362fc2e4d70SOleg Nesterov 	INIT_WORK(&barr->work, wq_barrier_func);
363fc2e4d70SOleg Nesterov 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
364fc2e4d70SOleg Nesterov 
365fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
36683c22520SOleg Nesterov 
36783c22520SOleg Nesterov 	insert_work(cwq, &barr->work, tail);
368fc2e4d70SOleg Nesterov }
369fc2e4d70SOleg Nesterov 
3701da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
3711da177e4SLinus Torvalds {
3721da177e4SLinus Torvalds 	if (cwq->thread == current) {
3731da177e4SLinus Torvalds 		/*
3741da177e4SLinus Torvalds 		 * Probably keventd trying to flush its own queue. So simply run
3751da177e4SLinus Torvalds 		 * it by hand rather than deadlocking.
3761da177e4SLinus Torvalds 		 */
3771da177e4SLinus Torvalds 		run_workqueue(cwq);
3781da177e4SLinus Torvalds 	} else {
379fc2e4d70SOleg Nesterov 		struct wq_barrier barr;
38083c22520SOleg Nesterov 		int active = 0;
3811da177e4SLinus Torvalds 
38283c22520SOleg Nesterov 		spin_lock_irq(&cwq->lock);
38383c22520SOleg Nesterov 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
38483c22520SOleg Nesterov 			insert_wq_barrier(cwq, &barr, 1);
38583c22520SOleg Nesterov 			active = 1;
38683c22520SOleg Nesterov 		}
38783c22520SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
3881da177e4SLinus Torvalds 
389d721304dSOleg Nesterov 		if (active)
390fc2e4d70SOleg Nesterov 			wait_for_completion(&barr.done);
3911da177e4SLinus Torvalds 	}
39283c22520SOleg Nesterov }
3931da177e4SLinus Torvalds 
3940fcb78c2SRolf Eike Beer /**
3951da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
3960fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
3971da177e4SLinus Torvalds  *
3981da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
3991da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
4001da177e4SLinus Torvalds  *
401fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
402fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
4031da177e4SLinus Torvalds  *
4041da177e4SLinus Torvalds  * This function used to run the workqueues itself.  Now we just wait for the
4051da177e4SLinus Torvalds  * helper threads to do it.
4061da177e4SLinus Torvalds  */
4071da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq)
4081da177e4SLinus Torvalds {
409b1f4ec17SOleg Nesterov 	const cpumask_t *cpu_map = wq_cpu_map(wq);
410cce1a165SOleg Nesterov 	int cpu;
411b1f4ec17SOleg Nesterov 
412f293ea92SOleg Nesterov 	might_sleep();
413b1f4ec17SOleg Nesterov 	for_each_cpu_mask(cpu, *cpu_map)
41489ada679SChristoph Lameter 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
4151da177e4SLinus Torvalds }
416ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
4171da177e4SLinus Torvalds 
418b89deed3SOleg Nesterov static void wait_on_work(struct cpu_workqueue_struct *cwq,
419b89deed3SOleg Nesterov 				struct work_struct *work)
420b89deed3SOleg Nesterov {
421b89deed3SOleg Nesterov 	struct wq_barrier barr;
422b89deed3SOleg Nesterov 	int running = 0;
423b89deed3SOleg Nesterov 
424b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
425b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
42683c22520SOleg Nesterov 		insert_wq_barrier(cwq, &barr, 0);
427b89deed3SOleg Nesterov 		running = 1;
428b89deed3SOleg Nesterov 	}
429b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
430b89deed3SOleg Nesterov 
4313af24433SOleg Nesterov 	if (unlikely(running))
432b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
433b89deed3SOleg Nesterov }
434b89deed3SOleg Nesterov 
435b89deed3SOleg Nesterov /**
436b89deed3SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
437b89deed3SOleg Nesterov  * @wq: the workqueue on which the work is queued
438b89deed3SOleg Nesterov  * @work: the work which is to be flushed
439b89deed3SOleg Nesterov  *
440b89deed3SOleg Nesterov  * flush_work() will attempt to cancel the work if it is queued.  If the work's
441b89deed3SOleg Nesterov  * callback appears to be running, flush_work() will block until it has
442b89deed3SOleg Nesterov  * completed.
443b89deed3SOleg Nesterov  *
444b89deed3SOleg Nesterov  * flush_work() is designed to be used when the caller is tearing down data
445b89deed3SOleg Nesterov  * structures which the callback function operates upon.  It is expected that,
446b89deed3SOleg Nesterov  * prior to calling flush_work(), the caller has arranged for the work to not
447b89deed3SOleg Nesterov  * be requeued.
448b89deed3SOleg Nesterov  */
449b89deed3SOleg Nesterov void flush_work(struct workqueue_struct *wq, struct work_struct *work)
450b89deed3SOleg Nesterov {
451b1f4ec17SOleg Nesterov 	const cpumask_t *cpu_map = wq_cpu_map(wq);
452b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
453b1f4ec17SOleg Nesterov 	int cpu;
454b89deed3SOleg Nesterov 
455f293ea92SOleg Nesterov 	might_sleep();
456f293ea92SOleg Nesterov 
457b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
458b89deed3SOleg Nesterov 	/* Was it ever queued ? */
459b89deed3SOleg Nesterov 	if (!cwq)
4603af24433SOleg Nesterov 		return;
461b89deed3SOleg Nesterov 
462b89deed3SOleg Nesterov 	/*
4633af24433SOleg Nesterov 	 * This work can't be re-queued, no need to re-check that
4643af24433SOleg Nesterov 	 * get_wq_data() is still the same when we take cwq->lock.
465b89deed3SOleg Nesterov 	 */
466b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
467b89deed3SOleg Nesterov 	list_del_init(&work->entry);
468b89deed3SOleg Nesterov 	work_release(work);
469b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
470b89deed3SOleg Nesterov 
471b1f4ec17SOleg Nesterov 	for_each_cpu_mask(cpu, *cpu_map)
472b89deed3SOleg Nesterov 		wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
473b89deed3SOleg Nesterov }
474b89deed3SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
475b89deed3SOleg Nesterov 
4761da177e4SLinus Torvalds 
4771da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq;
4781da177e4SLinus Torvalds 
4790fcb78c2SRolf Eike Beer /**
4800fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
4810fcb78c2SRolf Eike Beer  * @work: job to be done
4820fcb78c2SRolf Eike Beer  *
4830fcb78c2SRolf Eike Beer  * This puts a job in the kernel-global workqueue.
4840fcb78c2SRolf Eike Beer  */
4851da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work)
4861da177e4SLinus Torvalds {
4871da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
4881da177e4SLinus Torvalds }
489ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
4901da177e4SLinus Torvalds 
4910fcb78c2SRolf Eike Beer /**
4920fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
49352bad64dSDavid Howells  * @dwork: job to be done
49452bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
4950fcb78c2SRolf Eike Beer  *
4960fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
4970fcb78c2SRolf Eike Beer  * workqueue.
4980fcb78c2SRolf Eike Beer  */
49982f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork,
50082f67cd9SIngo Molnar 					unsigned long delay)
5011da177e4SLinus Torvalds {
50282f67cd9SIngo Molnar 	timer_stats_timer_set_start_info(&dwork->timer);
50352bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
5041da177e4SLinus Torvalds }
505ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
5061da177e4SLinus Torvalds 
5070fcb78c2SRolf Eike Beer /**
5080fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
5090fcb78c2SRolf Eike Beer  * @cpu: cpu to use
51052bad64dSDavid Howells  * @dwork: job to be done
5110fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
5120fcb78c2SRolf Eike Beer  *
5130fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
5140fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
5150fcb78c2SRolf Eike Beer  */
5161da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
51752bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
5181da177e4SLinus Torvalds {
51952bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
5201da177e4SLinus Torvalds }
521ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
5221da177e4SLinus Torvalds 
523b6136773SAndrew Morton /**
524b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
525b6136773SAndrew Morton  * @func: the function to call
526b6136773SAndrew Morton  *
527b6136773SAndrew Morton  * Returns zero on success.
528b6136773SAndrew Morton  * Returns -ve errno on failure.
529b6136773SAndrew Morton  *
530b6136773SAndrew Morton  * Appears to be racy against CPU hotplug.
531b6136773SAndrew Morton  *
532b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
533b6136773SAndrew Morton  */
53465f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
53515316ba8SChristoph Lameter {
53615316ba8SChristoph Lameter 	int cpu;
537b6136773SAndrew Morton 	struct work_struct *works;
53815316ba8SChristoph Lameter 
539b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
540b6136773SAndrew Morton 	if (!works)
54115316ba8SChristoph Lameter 		return -ENOMEM;
542b6136773SAndrew Morton 
543e18f3ffbSAndrew Morton 	preempt_disable();		/* CPU hotplug */
54415316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
5459bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
5469bfb1839SIngo Molnar 
5479bfb1839SIngo Molnar 		INIT_WORK(work, func);
5489bfb1839SIngo Molnar 		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
5499bfb1839SIngo Molnar 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
55015316ba8SChristoph Lameter 	}
551e18f3ffbSAndrew Morton 	preempt_enable();
55215316ba8SChristoph Lameter 	flush_workqueue(keventd_wq);
553b6136773SAndrew Morton 	free_percpu(works);
55415316ba8SChristoph Lameter 	return 0;
55515316ba8SChristoph Lameter }
55615316ba8SChristoph Lameter 
5571da177e4SLinus Torvalds void flush_scheduled_work(void)
5581da177e4SLinus Torvalds {
5591da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
5601da177e4SLinus Torvalds }
561ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
5621da177e4SLinus Torvalds 
563b89deed3SOleg Nesterov void flush_work_keventd(struct work_struct *work)
564b89deed3SOleg Nesterov {
565b89deed3SOleg Nesterov 	flush_work(keventd_wq, work);
566b89deed3SOleg Nesterov }
567b89deed3SOleg Nesterov EXPORT_SYMBOL(flush_work_keventd);
568b89deed3SOleg Nesterov 
5691da177e4SLinus Torvalds /**
570*ed7c0feeSOleg Nesterov  * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work.
5711da177e4SLinus Torvalds  * @wq:   the controlling workqueue structure
57252bad64dSDavid Howells  * @dwork: the delayed work struct
573*ed7c0feeSOleg Nesterov  *
574*ed7c0feeSOleg Nesterov  * Note that the work callback function may still be running on return from
575*ed7c0feeSOleg Nesterov  * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
5761da177e4SLinus Torvalds  */
57781ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
57852bad64dSDavid Howells 				       struct delayed_work *dwork)
5791da177e4SLinus Torvalds {
580dfb4b82eSOleg Nesterov 	/* Was it ever queued ? */
581dfb4b82eSOleg Nesterov 	if (!get_wq_data(&dwork->work))
582dfb4b82eSOleg Nesterov 		return;
583dfb4b82eSOleg Nesterov 
58452bad64dSDavid Howells 	while (!cancel_delayed_work(dwork))
5851da177e4SLinus Torvalds 		flush_workqueue(wq);
5861da177e4SLinus Torvalds }
58781ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
5881da177e4SLinus Torvalds 
5891da177e4SLinus Torvalds /**
590*ed7c0feeSOleg Nesterov  * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work.
59152bad64dSDavid Howells  * @dwork: the delayed work struct
5921da177e4SLinus Torvalds  */
59352bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork)
5941da177e4SLinus Torvalds {
59552bad64dSDavid Howells 	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
5961da177e4SLinus Torvalds }
5971da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work);
5981da177e4SLinus Torvalds 
5991fa44ecaSJames Bottomley /**
6001fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
6011fa44ecaSJames Bottomley  * @fn:		the function to execute
6021fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
6031fa44ecaSJames Bottomley  *		be available when the work executes)
6041fa44ecaSJames Bottomley  *
6051fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
6061fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
6071fa44ecaSJames Bottomley  *
6081fa44ecaSJames Bottomley  * Returns:	0 - function was executed
6091fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
6101fa44ecaSJames Bottomley  */
61165f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
6121fa44ecaSJames Bottomley {
6131fa44ecaSJames Bottomley 	if (!in_interrupt()) {
61465f27f38SDavid Howells 		fn(&ew->work);
6151fa44ecaSJames Bottomley 		return 0;
6161fa44ecaSJames Bottomley 	}
6171fa44ecaSJames Bottomley 
61865f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
6191fa44ecaSJames Bottomley 	schedule_work(&ew->work);
6201fa44ecaSJames Bottomley 
6211fa44ecaSJames Bottomley 	return 1;
6221fa44ecaSJames Bottomley }
6231fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
6241fa44ecaSJames Bottomley 
6251da177e4SLinus Torvalds int keventd_up(void)
6261da177e4SLinus Torvalds {
6271da177e4SLinus Torvalds 	return keventd_wq != NULL;
6281da177e4SLinus Torvalds }
6291da177e4SLinus Torvalds 
6301da177e4SLinus Torvalds int current_is_keventd(void)
6311da177e4SLinus Torvalds {
6321da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
6331da177e4SLinus Torvalds 	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
6341da177e4SLinus Torvalds 	int ret = 0;
6351da177e4SLinus Torvalds 
6361da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
6371da177e4SLinus Torvalds 
63889ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
6391da177e4SLinus Torvalds 	if (current == cwq->thread)
6401da177e4SLinus Torvalds 		ret = 1;
6411da177e4SLinus Torvalds 
6421da177e4SLinus Torvalds 	return ret;
6431da177e4SLinus Torvalds 
6441da177e4SLinus Torvalds }
6451da177e4SLinus Torvalds 
6463af24433SOleg Nesterov static struct cpu_workqueue_struct *
6473af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
6481da177e4SLinus Torvalds {
64989ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
6503af24433SOleg Nesterov 
6513af24433SOleg Nesterov 	cwq->wq = wq;
6523af24433SOleg Nesterov 	spin_lock_init(&cwq->lock);
6533af24433SOleg Nesterov 	INIT_LIST_HEAD(&cwq->worklist);
6543af24433SOleg Nesterov 	init_waitqueue_head(&cwq->more_work);
6553af24433SOleg Nesterov 
6563af24433SOleg Nesterov 	return cwq;
6573af24433SOleg Nesterov }
6583af24433SOleg Nesterov 
6593af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
6603af24433SOleg Nesterov {
6613af24433SOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
6623af24433SOleg Nesterov 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
6633af24433SOleg Nesterov 	struct task_struct *p;
6643af24433SOleg Nesterov 
6653af24433SOleg Nesterov 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
6663af24433SOleg Nesterov 	/*
6673af24433SOleg Nesterov 	 * Nobody can add the work_struct to this cwq,
6683af24433SOleg Nesterov 	 *	if (caller is __create_workqueue)
6693af24433SOleg Nesterov 	 *		nobody should see this wq
6703af24433SOleg Nesterov 	 *	else // caller is CPU_UP_PREPARE
6713af24433SOleg Nesterov 	 *		cpu is not on cpu_online_map
6723af24433SOleg Nesterov 	 * so we can abort safely.
6733af24433SOleg Nesterov 	 */
6743af24433SOleg Nesterov 	if (IS_ERR(p))
6753af24433SOleg Nesterov 		return PTR_ERR(p);
6763af24433SOleg Nesterov 
6773af24433SOleg Nesterov 	cwq->thread = p;
6783af24433SOleg Nesterov 	cwq->should_stop = 0;
6793af24433SOleg Nesterov 
6803af24433SOleg Nesterov 	return 0;
6813af24433SOleg Nesterov }
6823af24433SOleg Nesterov 
68306ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
68406ba38a9SOleg Nesterov {
68506ba38a9SOleg Nesterov 	struct task_struct *p = cwq->thread;
68606ba38a9SOleg Nesterov 
68706ba38a9SOleg Nesterov 	if (p != NULL) {
68806ba38a9SOleg Nesterov 		if (cpu >= 0)
68906ba38a9SOleg Nesterov 			kthread_bind(p, cpu);
69006ba38a9SOleg Nesterov 		wake_up_process(p);
69106ba38a9SOleg Nesterov 	}
69206ba38a9SOleg Nesterov }
69306ba38a9SOleg Nesterov 
6943af24433SOleg Nesterov struct workqueue_struct *__create_workqueue(const char *name,
6953af24433SOleg Nesterov 					    int singlethread, int freezeable)
6963af24433SOleg Nesterov {
6973af24433SOleg Nesterov 	struct workqueue_struct *wq;
6983af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
6993af24433SOleg Nesterov 	int err = 0, cpu;
7003af24433SOleg Nesterov 
7013af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
7023af24433SOleg Nesterov 	if (!wq)
7033af24433SOleg Nesterov 		return NULL;
7043af24433SOleg Nesterov 
7053af24433SOleg Nesterov 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
7063af24433SOleg Nesterov 	if (!wq->cpu_wq) {
7073af24433SOleg Nesterov 		kfree(wq);
7083af24433SOleg Nesterov 		return NULL;
7093af24433SOleg Nesterov 	}
7103af24433SOleg Nesterov 
7113af24433SOleg Nesterov 	wq->name = name;
712cce1a165SOleg Nesterov 	wq->singlethread = singlethread;
7133af24433SOleg Nesterov 	wq->freezeable = freezeable;
714cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
7153af24433SOleg Nesterov 
7163af24433SOleg Nesterov 	if (singlethread) {
7173af24433SOleg Nesterov 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
7183af24433SOleg Nesterov 		err = create_workqueue_thread(cwq, singlethread_cpu);
71906ba38a9SOleg Nesterov 		start_workqueue_thread(cwq, -1);
7203af24433SOleg Nesterov 	} else {
7213af24433SOleg Nesterov 		mutex_lock(&workqueue_mutex);
7223af24433SOleg Nesterov 		list_add(&wq->list, &workqueues);
7233af24433SOleg Nesterov 
7243af24433SOleg Nesterov 		for_each_possible_cpu(cpu) {
7253af24433SOleg Nesterov 			cwq = init_cpu_workqueue(wq, cpu);
7263af24433SOleg Nesterov 			if (err || !cpu_online(cpu))
7273af24433SOleg Nesterov 				continue;
7283af24433SOleg Nesterov 			err = create_workqueue_thread(cwq, cpu);
72906ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
7303af24433SOleg Nesterov 		}
7313af24433SOleg Nesterov 		mutex_unlock(&workqueue_mutex);
7323af24433SOleg Nesterov 	}
7333af24433SOleg Nesterov 
7343af24433SOleg Nesterov 	if (err) {
7353af24433SOleg Nesterov 		destroy_workqueue(wq);
7363af24433SOleg Nesterov 		wq = NULL;
7373af24433SOleg Nesterov 	}
7383af24433SOleg Nesterov 	return wq;
7393af24433SOleg Nesterov }
7403af24433SOleg Nesterov EXPORT_SYMBOL_GPL(__create_workqueue);
7413af24433SOleg Nesterov 
7423af24433SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
7433af24433SOleg Nesterov {
7443af24433SOleg Nesterov 	struct wq_barrier barr;
7453af24433SOleg Nesterov 	int alive = 0;
7461da177e4SLinus Torvalds 
7471da177e4SLinus Torvalds 	spin_lock_irq(&cwq->lock);
7483af24433SOleg Nesterov 	if (cwq->thread != NULL) {
7493af24433SOleg Nesterov 		insert_wq_barrier(cwq, &barr, 1);
7503af24433SOleg Nesterov 		cwq->should_stop = 1;
7513af24433SOleg Nesterov 		alive = 1;
7521da177e4SLinus Torvalds 	}
7531da177e4SLinus Torvalds 	spin_unlock_irq(&cwq->lock);
7543af24433SOleg Nesterov 
7553af24433SOleg Nesterov 	if (alive) {
7563af24433SOleg Nesterov 		wait_for_completion(&barr.done);
7573af24433SOleg Nesterov 
7583af24433SOleg Nesterov 		while (unlikely(cwq->thread != NULL))
7593af24433SOleg Nesterov 			cpu_relax();
7603af24433SOleg Nesterov 		/*
7613af24433SOleg Nesterov 		 * Wait until cwq->thread unlocks cwq->lock,
7623af24433SOleg Nesterov 		 * it won't touch *cwq after that.
7633af24433SOleg Nesterov 		 */
7643af24433SOleg Nesterov 		smp_rmb();
7653af24433SOleg Nesterov 		spin_unlock_wait(&cwq->lock);
7663af24433SOleg Nesterov 	}
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds 
7693af24433SOleg Nesterov /**
7703af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
7713af24433SOleg Nesterov  * @wq: target workqueue
7723af24433SOleg Nesterov  *
7733af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
7743af24433SOleg Nesterov  */
7753af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
7763af24433SOleg Nesterov {
777b1f4ec17SOleg Nesterov 	const cpumask_t *cpu_map = wq_cpu_map(wq);
7783af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
7793af24433SOleg Nesterov 	int cpu;
7803af24433SOleg Nesterov 
7813af24433SOleg Nesterov 	mutex_lock(&workqueue_mutex);
7823af24433SOleg Nesterov 	list_del(&wq->list);
7833af24433SOleg Nesterov 	mutex_unlock(&workqueue_mutex);
7843af24433SOleg Nesterov 
785b1f4ec17SOleg Nesterov 	for_each_cpu_mask(cpu, *cpu_map) {
7863af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
7873af24433SOleg Nesterov 		cleanup_workqueue_thread(cwq, cpu);
7883af24433SOleg Nesterov 	}
7893af24433SOleg Nesterov 
7903af24433SOleg Nesterov 	free_percpu(wq->cpu_wq);
7913af24433SOleg Nesterov 	kfree(wq);
7923af24433SOleg Nesterov }
7933af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
7943af24433SOleg Nesterov 
7959c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
7961da177e4SLinus Torvalds 						unsigned long action,
7971da177e4SLinus Torvalds 						void *hcpu)
7981da177e4SLinus Torvalds {
7993af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
8003af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
8011da177e4SLinus Torvalds 	struct workqueue_struct *wq;
8021da177e4SLinus Torvalds 
8031da177e4SLinus Torvalds 	switch (action) {
8043af24433SOleg Nesterov 	case CPU_LOCK_ACQUIRE:
8059b41ea72SAndrew Morton 		mutex_lock(&workqueue_mutex);
8063af24433SOleg Nesterov 		return NOTIFY_OK;
8073af24433SOleg Nesterov 
8083af24433SOleg Nesterov 	case CPU_LOCK_RELEASE:
8093af24433SOleg Nesterov 		mutex_unlock(&workqueue_mutex);
8103af24433SOleg Nesterov 		return NOTIFY_OK;
8113af24433SOleg Nesterov 
8123af24433SOleg Nesterov 	case CPU_UP_PREPARE:
8133af24433SOleg Nesterov 		cpu_set(cpu, cpu_populated_map);
8143af24433SOleg Nesterov 	}
8153af24433SOleg Nesterov 
8161da177e4SLinus Torvalds 	list_for_each_entry(wq, &workqueues, list) {
8173af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
8183af24433SOleg Nesterov 
8193af24433SOleg Nesterov 		switch (action) {
8203af24433SOleg Nesterov 		case CPU_UP_PREPARE:
8213af24433SOleg Nesterov 			if (!create_workqueue_thread(cwq, cpu))
8221da177e4SLinus Torvalds 				break;
8233af24433SOleg Nesterov 			printk(KERN_ERR "workqueue for %i failed\n", cpu);
8243af24433SOleg Nesterov 			return NOTIFY_BAD;
8251da177e4SLinus Torvalds 
8261da177e4SLinus Torvalds 		case CPU_ONLINE:
82706ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
8281da177e4SLinus Torvalds 			break;
8291da177e4SLinus Torvalds 
8301da177e4SLinus Torvalds 		case CPU_UP_CANCELED:
83106ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, -1);
8321da177e4SLinus Torvalds 		case CPU_DEAD:
8333af24433SOleg Nesterov 			cleanup_workqueue_thread(cwq, cpu);
8341da177e4SLinus Torvalds 			break;
8351da177e4SLinus Torvalds 		}
8363af24433SOleg Nesterov 	}
8371da177e4SLinus Torvalds 
8381da177e4SLinus Torvalds 	return NOTIFY_OK;
8391da177e4SLinus Torvalds }
8401da177e4SLinus Torvalds 
841c12920d1SOleg Nesterov void __init init_workqueues(void)
8421da177e4SLinus Torvalds {
8433af24433SOleg Nesterov 	cpu_populated_map = cpu_online_map;
844f756d5e2SNathan Lynch 	singlethread_cpu = first_cpu(cpu_possible_map);
845b1f4ec17SOleg Nesterov 	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
8461da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
8471da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
8481da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
8491da177e4SLinus Torvalds }
850