xref: /linux-6.15/kernel/workqueue.c (revision c1a220e7)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
121da177e4SLinus Torvalds  *   Andrew Morton <[email protected]>
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
361da177e4SLinus Torvalds 
371da177e4SLinus Torvalds /*
38f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
39f756d5e2SNathan Lynch  * possible cpu).
401da177e4SLinus Torvalds  */
411da177e4SLinus Torvalds struct cpu_workqueue_struct {
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds 	spinlock_t lock;
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds 	struct list_head worklist;
461da177e4SLinus Torvalds 	wait_queue_head_t more_work;
473af24433SOleg Nesterov 	struct work_struct *current_work;
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds 	struct workqueue_struct *wq;
5036c8b586SIngo Molnar 	struct task_struct *thread;
511da177e4SLinus Torvalds 
521da177e4SLinus Torvalds 	int run_depth;		/* Detect run_workqueue() recursion depth */
531da177e4SLinus Torvalds } ____cacheline_aligned;
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds /*
561da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
571da177e4SLinus Torvalds  * per-CPU workqueues:
581da177e4SLinus Torvalds  */
591da177e4SLinus Torvalds struct workqueue_struct {
6089ada679SChristoph Lameter 	struct cpu_workqueue_struct *cpu_wq;
61cce1a165SOleg Nesterov 	struct list_head list;
621da177e4SLinus Torvalds 	const char *name;
63cce1a165SOleg Nesterov 	int singlethread;
64319c2a98SOleg Nesterov 	int freezeable;		/* Freeze threads during suspend */
654e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
664e6045f1SJohannes Berg 	struct lockdep_map lockdep_map;
674e6045f1SJohannes Berg #endif
681da177e4SLinus Torvalds };
691da177e4SLinus Torvalds 
7095402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
7195402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
721da177e4SLinus Torvalds static LIST_HEAD(workqueues);
731da177e4SLinus Torvalds 
743af24433SOleg Nesterov static int singlethread_cpu __read_mostly;
75b1f4ec17SOleg Nesterov static cpumask_t cpu_singlethread_map __read_mostly;
7614441960SOleg Nesterov /*
7714441960SOleg Nesterov  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
7814441960SOleg Nesterov  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
7914441960SOleg Nesterov  * which comes in between can't use for_each_online_cpu(). We could
8014441960SOleg Nesterov  * use cpu_possible_map, the cpumask below is more a documentation
8114441960SOleg Nesterov  * than optimization.
8214441960SOleg Nesterov  */
833af24433SOleg Nesterov static cpumask_t cpu_populated_map __read_mostly;
84f756d5e2SNathan Lynch 
851da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
861da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq)
871da177e4SLinus Torvalds {
88cce1a165SOleg Nesterov 	return wq->singlethread;
891da177e4SLinus Torvalds }
901da177e4SLinus Torvalds 
91b1f4ec17SOleg Nesterov static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
92b1f4ec17SOleg Nesterov {
93b1f4ec17SOleg Nesterov 	return is_single_threaded(wq)
94b1f4ec17SOleg Nesterov 		? &cpu_singlethread_map : &cpu_populated_map;
95b1f4ec17SOleg Nesterov }
96b1f4ec17SOleg Nesterov 
97a848e3b6SOleg Nesterov static
98a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
99a848e3b6SOleg Nesterov {
100a848e3b6SOleg Nesterov 	if (unlikely(is_single_threaded(wq)))
101a848e3b6SOleg Nesterov 		cpu = singlethread_cpu;
102a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
103a848e3b6SOleg Nesterov }
104a848e3b6SOleg Nesterov 
1054594bf15SDavid Howells /*
1064594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
1074594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
1084594bf15SDavid Howells  */
109ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
110ed7c0feeSOleg Nesterov 				struct cpu_workqueue_struct *cwq)
111365970a1SDavid Howells {
1124594bf15SDavid Howells 	unsigned long new;
113365970a1SDavid Howells 
1144594bf15SDavid Howells 	BUG_ON(!work_pending(work));
1154594bf15SDavid Howells 
116ed7c0feeSOleg Nesterov 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
117a08727baSLinus Torvalds 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
118a08727baSLinus Torvalds 	atomic_long_set(&work->data, new);
119365970a1SDavid Howells }
120365970a1SDavid Howells 
121ed7c0feeSOleg Nesterov static inline
122ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
123365970a1SDavid Howells {
124a08727baSLinus Torvalds 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
125365970a1SDavid Howells }
126365970a1SDavid Howells 
127b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
128b89deed3SOleg Nesterov 				struct work_struct *work, int tail)
129b89deed3SOleg Nesterov {
130b89deed3SOleg Nesterov 	set_wq_data(work, cwq);
1316e84d644SOleg Nesterov 	/*
1326e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
1336e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
1346e84d644SOleg Nesterov 	 */
1356e84d644SOleg Nesterov 	smp_wmb();
136b89deed3SOleg Nesterov 	if (tail)
137b89deed3SOleg Nesterov 		list_add_tail(&work->entry, &cwq->worklist);
138b89deed3SOleg Nesterov 	else
139b89deed3SOleg Nesterov 		list_add(&work->entry, &cwq->worklist);
140b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
141b89deed3SOleg Nesterov }
142b89deed3SOleg Nesterov 
1431da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq,
1441da177e4SLinus Torvalds 			 struct work_struct *work)
1451da177e4SLinus Torvalds {
1461da177e4SLinus Torvalds 	unsigned long flags;
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
149b89deed3SOleg Nesterov 	insert_work(cwq, work, 1);
1501da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
1511da177e4SLinus Torvalds }
1521da177e4SLinus Torvalds 
1530fcb78c2SRolf Eike Beer /**
1540fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
1550fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1560fcb78c2SRolf Eike Beer  * @work: work to queue
1570fcb78c2SRolf Eike Beer  *
158057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1591da177e4SLinus Torvalds  *
16000dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
16100dfcaf7SOleg Nesterov  * it can be processed by another CPU.
1621da177e4SLinus Torvalds  */
1637ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1641da177e4SLinus Torvalds {
165a848e3b6SOleg Nesterov 	int ret = 0;
1661da177e4SLinus Torvalds 
167a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1681da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
169a848e3b6SOleg Nesterov 		__queue_work(wq_per_cpu(wq, get_cpu()), work);
170a848e3b6SOleg Nesterov 		put_cpu();
1711da177e4SLinus Torvalds 		ret = 1;
1721da177e4SLinus Torvalds 	}
1731da177e4SLinus Torvalds 	return ret;
1741da177e4SLinus Torvalds }
175ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
1761da177e4SLinus Torvalds 
177*c1a220e7SZhang Rui /**
178*c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
179*c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
180*c1a220e7SZhang Rui  * @wq: workqueue to use
181*c1a220e7SZhang Rui  * @work: work to queue
182*c1a220e7SZhang Rui  *
183*c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
184*c1a220e7SZhang Rui  *
185*c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
186*c1a220e7SZhang Rui  * can't go away.
187*c1a220e7SZhang Rui  */
188*c1a220e7SZhang Rui int
189*c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
190*c1a220e7SZhang Rui {
191*c1a220e7SZhang Rui 	int ret = 0;
192*c1a220e7SZhang Rui 
193*c1a220e7SZhang Rui 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
194*c1a220e7SZhang Rui 		BUG_ON(!list_empty(&work->entry));
195*c1a220e7SZhang Rui 		__queue_work(wq_per_cpu(wq, cpu), work);
196*c1a220e7SZhang Rui 		ret = 1;
197*c1a220e7SZhang Rui 	}
198*c1a220e7SZhang Rui 	return ret;
199*c1a220e7SZhang Rui }
200*c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
201*c1a220e7SZhang Rui 
2026d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
2031da177e4SLinus Torvalds {
20452bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
205ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
206ed7c0feeSOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
2071da177e4SLinus Torvalds 
208a848e3b6SOleg Nesterov 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
2091da177e4SLinus Torvalds }
2101da177e4SLinus Torvalds 
2110fcb78c2SRolf Eike Beer /**
2120fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
2130fcb78c2SRolf Eike Beer  * @wq: workqueue to use
214af9997e4SRandy Dunlap  * @dwork: delayable work to queue
2150fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2160fcb78c2SRolf Eike Beer  *
217057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2180fcb78c2SRolf Eike Beer  */
2197ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
22052bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2211da177e4SLinus Torvalds {
22252bad64dSDavid Howells 	if (delay == 0)
22363bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
2241da177e4SLinus Torvalds 
22563bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
2261da177e4SLinus Torvalds }
227ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
2281da177e4SLinus Torvalds 
2290fcb78c2SRolf Eike Beer /**
2300fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
2310fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
2320fcb78c2SRolf Eike Beer  * @wq: workqueue to use
233af9997e4SRandy Dunlap  * @dwork: work to queue
2340fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2350fcb78c2SRolf Eike Beer  *
236057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2370fcb78c2SRolf Eike Beer  */
2387a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
23952bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2407a6bc1cdSVenkatesh Pallipadi {
2417a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
24252bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
24352bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
2447a6bc1cdSVenkatesh Pallipadi 
245a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
2467a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
2477a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
2487a6bc1cdSVenkatesh Pallipadi 
2498a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
2508a3e77ccSAndrew Liu 
251ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
252a848e3b6SOleg Nesterov 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
2537a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
25452bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
2557a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
25663bc0362SOleg Nesterov 
25763bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
2587a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
25963bc0362SOleg Nesterov 		else
26063bc0362SOleg Nesterov 			add_timer(timer);
2617a6bc1cdSVenkatesh Pallipadi 		ret = 1;
2627a6bc1cdSVenkatesh Pallipadi 	}
2637a6bc1cdSVenkatesh Pallipadi 	return ret;
2647a6bc1cdSVenkatesh Pallipadi }
265ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
2661da177e4SLinus Torvalds 
267858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
2681da177e4SLinus Torvalds {
269f293ea92SOleg Nesterov 	spin_lock_irq(&cwq->lock);
2701da177e4SLinus Torvalds 	cwq->run_depth++;
2711da177e4SLinus Torvalds 	if (cwq->run_depth > 3) {
2721da177e4SLinus Torvalds 		/* morton gets to eat his hat */
2731da177e4SLinus Torvalds 		printk("%s: recursion depth exceeded: %d\n",
274af1f16d0SHarvey Harrison 			__func__, cwq->run_depth);
2751da177e4SLinus Torvalds 		dump_stack();
2761da177e4SLinus Torvalds 	}
2771da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
2781da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
2791da177e4SLinus Torvalds 						struct work_struct, entry);
2806bb49e59SDavid Howells 		work_func_t f = work->func;
2814e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
2824e6045f1SJohannes Berg 		/*
2834e6045f1SJohannes Berg 		 * It is permissible to free the struct work_struct
2844e6045f1SJohannes Berg 		 * from inside the function that is called from it,
2854e6045f1SJohannes Berg 		 * this we need to take into account for lockdep too.
2864e6045f1SJohannes Berg 		 * To avoid bogus "held lock freed" warnings as well
2874e6045f1SJohannes Berg 		 * as problems when looking into work->lockdep_map,
2884e6045f1SJohannes Berg 		 * make a copy and use that here.
2894e6045f1SJohannes Berg 		 */
2904e6045f1SJohannes Berg 		struct lockdep_map lockdep_map = work->lockdep_map;
2914e6045f1SJohannes Berg #endif
2921da177e4SLinus Torvalds 
293b89deed3SOleg Nesterov 		cwq->current_work = work;
2941da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
295f293ea92SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
2961da177e4SLinus Torvalds 
297365970a1SDavid Howells 		BUG_ON(get_wq_data(work) != cwq);
29823b2e599SOleg Nesterov 		work_clear_pending(work);
2994e6045f1SJohannes Berg 		lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
3004e6045f1SJohannes Berg 		lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
30165f27f38SDavid Howells 		f(work);
3024e6045f1SJohannes Berg 		lock_release(&lockdep_map, 1, _THIS_IP_);
3034e6045f1SJohannes Berg 		lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
3041da177e4SLinus Torvalds 
305d5abe669SPeter Zijlstra 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
306d5abe669SPeter Zijlstra 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
307d5abe669SPeter Zijlstra 					"%s/0x%08x/%d\n",
308d5abe669SPeter Zijlstra 					current->comm, preempt_count(),
309ba25f9dcSPavel Emelyanov 				       	task_pid_nr(current));
310d5abe669SPeter Zijlstra 			printk(KERN_ERR "    last function: ");
311d5abe669SPeter Zijlstra 			print_symbol("%s\n", (unsigned long)f);
312d5abe669SPeter Zijlstra 			debug_show_held_locks(current);
313d5abe669SPeter Zijlstra 			dump_stack();
314d5abe669SPeter Zijlstra 		}
315d5abe669SPeter Zijlstra 
316f293ea92SOleg Nesterov 		spin_lock_irq(&cwq->lock);
317b89deed3SOleg Nesterov 		cwq->current_work = NULL;
3181da177e4SLinus Torvalds 	}
3191da177e4SLinus Torvalds 	cwq->run_depth--;
320f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
3211da177e4SLinus Torvalds }
3221da177e4SLinus Torvalds 
3231da177e4SLinus Torvalds static int worker_thread(void *__cwq)
3241da177e4SLinus Torvalds {
3251da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
3263af24433SOleg Nesterov 	DEFINE_WAIT(wait);
3271da177e4SLinus Torvalds 
32883144186SRafael J. Wysocki 	if (cwq->wq->freezeable)
32983144186SRafael J. Wysocki 		set_freezable();
3301da177e4SLinus Torvalds 
3311da177e4SLinus Torvalds 	set_user_nice(current, -5);
3321da177e4SLinus Torvalds 
3333af24433SOleg Nesterov 	for (;;) {
3343af24433SOleg Nesterov 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
33514441960SOleg Nesterov 		if (!freezing(current) &&
33614441960SOleg Nesterov 		    !kthread_should_stop() &&
33714441960SOleg Nesterov 		    list_empty(&cwq->worklist))
3381da177e4SLinus Torvalds 			schedule();
3393af24433SOleg Nesterov 		finish_wait(&cwq->more_work, &wait);
3401da177e4SLinus Torvalds 
34185f4186aSOleg Nesterov 		try_to_freeze();
34285f4186aSOleg Nesterov 
34314441960SOleg Nesterov 		if (kthread_should_stop())
3443af24433SOleg Nesterov 			break;
3453af24433SOleg Nesterov 
3461da177e4SLinus Torvalds 		run_workqueue(cwq);
3471da177e4SLinus Torvalds 	}
3483af24433SOleg Nesterov 
3491da177e4SLinus Torvalds 	return 0;
3501da177e4SLinus Torvalds }
3511da177e4SLinus Torvalds 
352fc2e4d70SOleg Nesterov struct wq_barrier {
353fc2e4d70SOleg Nesterov 	struct work_struct	work;
354fc2e4d70SOleg Nesterov 	struct completion	done;
355fc2e4d70SOleg Nesterov };
356fc2e4d70SOleg Nesterov 
357fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
358fc2e4d70SOleg Nesterov {
359fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
360fc2e4d70SOleg Nesterov 	complete(&barr->done);
361fc2e4d70SOleg Nesterov }
362fc2e4d70SOleg Nesterov 
36383c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
36483c22520SOleg Nesterov 					struct wq_barrier *barr, int tail)
365fc2e4d70SOleg Nesterov {
366fc2e4d70SOleg Nesterov 	INIT_WORK(&barr->work, wq_barrier_func);
367fc2e4d70SOleg Nesterov 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
368fc2e4d70SOleg Nesterov 
369fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
37083c22520SOleg Nesterov 
37183c22520SOleg Nesterov 	insert_work(cwq, &barr->work, tail);
372fc2e4d70SOleg Nesterov }
373fc2e4d70SOleg Nesterov 
37414441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
3751da177e4SLinus Torvalds {
37614441960SOleg Nesterov 	int active;
37714441960SOleg Nesterov 
3781da177e4SLinus Torvalds 	if (cwq->thread == current) {
3791da177e4SLinus Torvalds 		/*
3801da177e4SLinus Torvalds 		 * Probably keventd trying to flush its own queue. So simply run
3811da177e4SLinus Torvalds 		 * it by hand rather than deadlocking.
3821da177e4SLinus Torvalds 		 */
3831da177e4SLinus Torvalds 		run_workqueue(cwq);
38414441960SOleg Nesterov 		active = 1;
3851da177e4SLinus Torvalds 	} else {
386fc2e4d70SOleg Nesterov 		struct wq_barrier barr;
3871da177e4SLinus Torvalds 
38814441960SOleg Nesterov 		active = 0;
38983c22520SOleg Nesterov 		spin_lock_irq(&cwq->lock);
39083c22520SOleg Nesterov 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
39183c22520SOleg Nesterov 			insert_wq_barrier(cwq, &barr, 1);
39283c22520SOleg Nesterov 			active = 1;
39383c22520SOleg Nesterov 		}
39483c22520SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
3951da177e4SLinus Torvalds 
396d721304dSOleg Nesterov 		if (active)
397fc2e4d70SOleg Nesterov 			wait_for_completion(&barr.done);
3981da177e4SLinus Torvalds 	}
39914441960SOleg Nesterov 
40014441960SOleg Nesterov 	return active;
40183c22520SOleg Nesterov }
4021da177e4SLinus Torvalds 
4030fcb78c2SRolf Eike Beer /**
4041da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
4050fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
4061da177e4SLinus Torvalds  *
4071da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
4081da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
4091da177e4SLinus Torvalds  *
410fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
411fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
4121da177e4SLinus Torvalds  *
4131da177e4SLinus Torvalds  * This function used to run the workqueues itself.  Now we just wait for the
4141da177e4SLinus Torvalds  * helper threads to do it.
4151da177e4SLinus Torvalds  */
4167ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
4171da177e4SLinus Torvalds {
418b1f4ec17SOleg Nesterov 	const cpumask_t *cpu_map = wq_cpu_map(wq);
419cce1a165SOleg Nesterov 	int cpu;
420b1f4ec17SOleg Nesterov 
421f293ea92SOleg Nesterov 	might_sleep();
4224e6045f1SJohannes Berg 	lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
4234e6045f1SJohannes Berg 	lock_release(&wq->lockdep_map, 1, _THIS_IP_);
424363ab6f1SMike Travis 	for_each_cpu_mask_nr(cpu, *cpu_map)
42589ada679SChristoph Lameter 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
4261da177e4SLinus Torvalds }
427ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
4281da177e4SLinus Torvalds 
4296e84d644SOleg Nesterov /*
4301f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
4316e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
4326e84d644SOleg Nesterov  */
4336e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
4346e84d644SOleg Nesterov {
4356e84d644SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
4361f1f642eSOleg Nesterov 	int ret = -1;
4376e84d644SOleg Nesterov 
4386e84d644SOleg Nesterov 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
4391f1f642eSOleg Nesterov 		return 0;
4406e84d644SOleg Nesterov 
4416e84d644SOleg Nesterov 	/*
4426e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
4436e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
4446e84d644SOleg Nesterov 	 */
4456e84d644SOleg Nesterov 
4466e84d644SOleg Nesterov 	cwq = get_wq_data(work);
4476e84d644SOleg Nesterov 	if (!cwq)
4486e84d644SOleg Nesterov 		return ret;
4496e84d644SOleg Nesterov 
4506e84d644SOleg Nesterov 	spin_lock_irq(&cwq->lock);
4516e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
4526e84d644SOleg Nesterov 		/*
4536e84d644SOleg Nesterov 		 * This work is queued, but perhaps we locked the wrong cwq.
4546e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
4556e84d644SOleg Nesterov 		 * insert_work()->wmb().
4566e84d644SOleg Nesterov 		 */
4576e84d644SOleg Nesterov 		smp_rmb();
4586e84d644SOleg Nesterov 		if (cwq == get_wq_data(work)) {
4596e84d644SOleg Nesterov 			list_del_init(&work->entry);
4606e84d644SOleg Nesterov 			ret = 1;
4616e84d644SOleg Nesterov 		}
4626e84d644SOleg Nesterov 	}
4636e84d644SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
4646e84d644SOleg Nesterov 
4656e84d644SOleg Nesterov 	return ret;
4666e84d644SOleg Nesterov }
4676e84d644SOleg Nesterov 
4686e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
469b89deed3SOleg Nesterov 				struct work_struct *work)
470b89deed3SOleg Nesterov {
471b89deed3SOleg Nesterov 	struct wq_barrier barr;
472b89deed3SOleg Nesterov 	int running = 0;
473b89deed3SOleg Nesterov 
474b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
475b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
47683c22520SOleg Nesterov 		insert_wq_barrier(cwq, &barr, 0);
477b89deed3SOleg Nesterov 		running = 1;
478b89deed3SOleg Nesterov 	}
479b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
480b89deed3SOleg Nesterov 
4813af24433SOleg Nesterov 	if (unlikely(running))
482b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
483b89deed3SOleg Nesterov }
484b89deed3SOleg Nesterov 
4856e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
486b89deed3SOleg Nesterov {
487b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
48828e53bddSOleg Nesterov 	struct workqueue_struct *wq;
48928e53bddSOleg Nesterov 	const cpumask_t *cpu_map;
490b1f4ec17SOleg Nesterov 	int cpu;
491b89deed3SOleg Nesterov 
492f293ea92SOleg Nesterov 	might_sleep();
493f293ea92SOleg Nesterov 
4944e6045f1SJohannes Berg 	lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
4954e6045f1SJohannes Berg 	lock_release(&work->lockdep_map, 1, _THIS_IP_);
4964e6045f1SJohannes Berg 
497b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
498b89deed3SOleg Nesterov 	if (!cwq)
4993af24433SOleg Nesterov 		return;
500b89deed3SOleg Nesterov 
50128e53bddSOleg Nesterov 	wq = cwq->wq;
50228e53bddSOleg Nesterov 	cpu_map = wq_cpu_map(wq);
50328e53bddSOleg Nesterov 
504363ab6f1SMike Travis 	for_each_cpu_mask_nr(cpu, *cpu_map)
5056e84d644SOleg Nesterov 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
5066e84d644SOleg Nesterov }
5076e84d644SOleg Nesterov 
5081f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
5091f1f642eSOleg Nesterov 				struct timer_list* timer)
5101f1f642eSOleg Nesterov {
5111f1f642eSOleg Nesterov 	int ret;
5121f1f642eSOleg Nesterov 
5131f1f642eSOleg Nesterov 	do {
5141f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
5151f1f642eSOleg Nesterov 		if (!ret)
5161f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
5171f1f642eSOleg Nesterov 		wait_on_work(work);
5181f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
5191f1f642eSOleg Nesterov 
5201f1f642eSOleg Nesterov 	work_clear_pending(work);
5211f1f642eSOleg Nesterov 	return ret;
5221f1f642eSOleg Nesterov }
5231f1f642eSOleg Nesterov 
5246e84d644SOleg Nesterov /**
5256e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
5266e84d644SOleg Nesterov  * @work: the work which is to be flushed
5276e84d644SOleg Nesterov  *
5281f1f642eSOleg Nesterov  * Returns true if @work was pending.
5291f1f642eSOleg Nesterov  *
5306e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
5316e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
5326e84d644SOleg Nesterov  * has completed.
5336e84d644SOleg Nesterov  *
5346e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
5356e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
5366e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
5376e84d644SOleg Nesterov  * workqueue.
5386e84d644SOleg Nesterov  *
5396e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
5406e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
5416e84d644SOleg Nesterov  *
5426e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
5436e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
5446e84d644SOleg Nesterov  */
5451f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
5466e84d644SOleg Nesterov {
5471f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
548b89deed3SOleg Nesterov }
54928e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
550b89deed3SOleg Nesterov 
5516e84d644SOleg Nesterov /**
552f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
5536e84d644SOleg Nesterov  * @dwork: the delayed work struct
5546e84d644SOleg Nesterov  *
5551f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
5561f1f642eSOleg Nesterov  *
5576e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
5586e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
5596e84d644SOleg Nesterov  */
5601f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
5616e84d644SOleg Nesterov {
5621f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
5636e84d644SOleg Nesterov }
564f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
5651da177e4SLinus Torvalds 
5666e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
5671da177e4SLinus Torvalds 
5680fcb78c2SRolf Eike Beer /**
5690fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
5700fcb78c2SRolf Eike Beer  * @work: job to be done
5710fcb78c2SRolf Eike Beer  *
5720fcb78c2SRolf Eike Beer  * This puts a job in the kernel-global workqueue.
5730fcb78c2SRolf Eike Beer  */
5747ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
5751da177e4SLinus Torvalds {
5761da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
5771da177e4SLinus Torvalds }
578ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
5791da177e4SLinus Torvalds 
580*c1a220e7SZhang Rui /*
581*c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
582*c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
583*c1a220e7SZhang Rui  * @work: job to be done
584*c1a220e7SZhang Rui  *
585*c1a220e7SZhang Rui  * This puts a job on a specific cpu
586*c1a220e7SZhang Rui  */
587*c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
588*c1a220e7SZhang Rui {
589*c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
590*c1a220e7SZhang Rui }
591*c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
592*c1a220e7SZhang Rui 
5930fcb78c2SRolf Eike Beer /**
5940fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
59552bad64dSDavid Howells  * @dwork: job to be done
59652bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
5970fcb78c2SRolf Eike Beer  *
5980fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
5990fcb78c2SRolf Eike Beer  * workqueue.
6000fcb78c2SRolf Eike Beer  */
6017ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
60282f67cd9SIngo Molnar 					unsigned long delay)
6031da177e4SLinus Torvalds {
60452bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
6051da177e4SLinus Torvalds }
606ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
6071da177e4SLinus Torvalds 
6080fcb78c2SRolf Eike Beer /**
6090fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
6100fcb78c2SRolf Eike Beer  * @cpu: cpu to use
61152bad64dSDavid Howells  * @dwork: job to be done
6120fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
6130fcb78c2SRolf Eike Beer  *
6140fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
6150fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
6160fcb78c2SRolf Eike Beer  */
6171da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
61852bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
6191da177e4SLinus Torvalds {
62052bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
6211da177e4SLinus Torvalds }
622ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
6231da177e4SLinus Torvalds 
624b6136773SAndrew Morton /**
625b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
626b6136773SAndrew Morton  * @func: the function to call
627b6136773SAndrew Morton  *
628b6136773SAndrew Morton  * Returns zero on success.
629b6136773SAndrew Morton  * Returns -ve errno on failure.
630b6136773SAndrew Morton  *
631b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
632b6136773SAndrew Morton  */
63365f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
63415316ba8SChristoph Lameter {
63515316ba8SChristoph Lameter 	int cpu;
636b6136773SAndrew Morton 	struct work_struct *works;
63715316ba8SChristoph Lameter 
638b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
639b6136773SAndrew Morton 	if (!works)
64015316ba8SChristoph Lameter 		return -ENOMEM;
641b6136773SAndrew Morton 
64295402b38SGautham R Shenoy 	get_online_cpus();
64315316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
6449bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
6459bfb1839SIngo Molnar 
6469bfb1839SIngo Molnar 		INIT_WORK(work, func);
6479bfb1839SIngo Molnar 		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
6489bfb1839SIngo Molnar 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
64915316ba8SChristoph Lameter 	}
65015316ba8SChristoph Lameter 	flush_workqueue(keventd_wq);
65195402b38SGautham R Shenoy 	put_online_cpus();
652b6136773SAndrew Morton 	free_percpu(works);
65315316ba8SChristoph Lameter 	return 0;
65415316ba8SChristoph Lameter }
65515316ba8SChristoph Lameter 
6561da177e4SLinus Torvalds void flush_scheduled_work(void)
6571da177e4SLinus Torvalds {
6581da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
6591da177e4SLinus Torvalds }
660ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
6611da177e4SLinus Torvalds 
6621da177e4SLinus Torvalds /**
6631fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
6641fa44ecaSJames Bottomley  * @fn:		the function to execute
6651fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
6661fa44ecaSJames Bottomley  *		be available when the work executes)
6671fa44ecaSJames Bottomley  *
6681fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
6691fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
6701fa44ecaSJames Bottomley  *
6711fa44ecaSJames Bottomley  * Returns:	0 - function was executed
6721fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
6731fa44ecaSJames Bottomley  */
67465f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
6751fa44ecaSJames Bottomley {
6761fa44ecaSJames Bottomley 	if (!in_interrupt()) {
67765f27f38SDavid Howells 		fn(&ew->work);
6781fa44ecaSJames Bottomley 		return 0;
6791fa44ecaSJames Bottomley 	}
6801fa44ecaSJames Bottomley 
68165f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
6821fa44ecaSJames Bottomley 	schedule_work(&ew->work);
6831fa44ecaSJames Bottomley 
6841fa44ecaSJames Bottomley 	return 1;
6851fa44ecaSJames Bottomley }
6861fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
6871fa44ecaSJames Bottomley 
6881da177e4SLinus Torvalds int keventd_up(void)
6891da177e4SLinus Torvalds {
6901da177e4SLinus Torvalds 	return keventd_wq != NULL;
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds int current_is_keventd(void)
6941da177e4SLinus Torvalds {
6951da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
696d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
6971da177e4SLinus Torvalds 	int ret = 0;
6981da177e4SLinus Torvalds 
6991da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
7001da177e4SLinus Torvalds 
70189ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
7021da177e4SLinus Torvalds 	if (current == cwq->thread)
7031da177e4SLinus Torvalds 		ret = 1;
7041da177e4SLinus Torvalds 
7051da177e4SLinus Torvalds 	return ret;
7061da177e4SLinus Torvalds 
7071da177e4SLinus Torvalds }
7081da177e4SLinus Torvalds 
7093af24433SOleg Nesterov static struct cpu_workqueue_struct *
7103af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
7111da177e4SLinus Torvalds {
71289ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
7133af24433SOleg Nesterov 
7143af24433SOleg Nesterov 	cwq->wq = wq;
7153af24433SOleg Nesterov 	spin_lock_init(&cwq->lock);
7163af24433SOleg Nesterov 	INIT_LIST_HEAD(&cwq->worklist);
7173af24433SOleg Nesterov 	init_waitqueue_head(&cwq->more_work);
7183af24433SOleg Nesterov 
7193af24433SOleg Nesterov 	return cwq;
7203af24433SOleg Nesterov }
7213af24433SOleg Nesterov 
7223af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
7233af24433SOleg Nesterov {
7243af24433SOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
7253af24433SOleg Nesterov 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
7263af24433SOleg Nesterov 	struct task_struct *p;
7273af24433SOleg Nesterov 
7283af24433SOleg Nesterov 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
7293af24433SOleg Nesterov 	/*
7303af24433SOleg Nesterov 	 * Nobody can add the work_struct to this cwq,
7313af24433SOleg Nesterov 	 *	if (caller is __create_workqueue)
7323af24433SOleg Nesterov 	 *		nobody should see this wq
7333af24433SOleg Nesterov 	 *	else // caller is CPU_UP_PREPARE
7343af24433SOleg Nesterov 	 *		cpu is not on cpu_online_map
7353af24433SOleg Nesterov 	 * so we can abort safely.
7363af24433SOleg Nesterov 	 */
7373af24433SOleg Nesterov 	if (IS_ERR(p))
7383af24433SOleg Nesterov 		return PTR_ERR(p);
7393af24433SOleg Nesterov 
7403af24433SOleg Nesterov 	cwq->thread = p;
7413af24433SOleg Nesterov 
7423af24433SOleg Nesterov 	return 0;
7433af24433SOleg Nesterov }
7443af24433SOleg Nesterov 
74506ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
74606ba38a9SOleg Nesterov {
74706ba38a9SOleg Nesterov 	struct task_struct *p = cwq->thread;
74806ba38a9SOleg Nesterov 
74906ba38a9SOleg Nesterov 	if (p != NULL) {
75006ba38a9SOleg Nesterov 		if (cpu >= 0)
75106ba38a9SOleg Nesterov 			kthread_bind(p, cpu);
75206ba38a9SOleg Nesterov 		wake_up_process(p);
75306ba38a9SOleg Nesterov 	}
75406ba38a9SOleg Nesterov }
75506ba38a9SOleg Nesterov 
7564e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
7574e6045f1SJohannes Berg 						int singlethread,
7584e6045f1SJohannes Berg 						int freezeable,
759eb13ba87SJohannes Berg 						struct lock_class_key *key,
760eb13ba87SJohannes Berg 						const char *lock_name)
7613af24433SOleg Nesterov {
7623af24433SOleg Nesterov 	struct workqueue_struct *wq;
7633af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
7643af24433SOleg Nesterov 	int err = 0, cpu;
7653af24433SOleg Nesterov 
7663af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
7673af24433SOleg Nesterov 	if (!wq)
7683af24433SOleg Nesterov 		return NULL;
7693af24433SOleg Nesterov 
7703af24433SOleg Nesterov 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
7713af24433SOleg Nesterov 	if (!wq->cpu_wq) {
7723af24433SOleg Nesterov 		kfree(wq);
7733af24433SOleg Nesterov 		return NULL;
7743af24433SOleg Nesterov 	}
7753af24433SOleg Nesterov 
7763af24433SOleg Nesterov 	wq->name = name;
777eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
778cce1a165SOleg Nesterov 	wq->singlethread = singlethread;
7793af24433SOleg Nesterov 	wq->freezeable = freezeable;
780cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
7813af24433SOleg Nesterov 
7823af24433SOleg Nesterov 	if (singlethread) {
7833af24433SOleg Nesterov 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
7843af24433SOleg Nesterov 		err = create_workqueue_thread(cwq, singlethread_cpu);
78506ba38a9SOleg Nesterov 		start_workqueue_thread(cwq, -1);
7863af24433SOleg Nesterov 	} else {
78795402b38SGautham R Shenoy 		get_online_cpus();
78895402b38SGautham R Shenoy 		spin_lock(&workqueue_lock);
7893af24433SOleg Nesterov 		list_add(&wq->list, &workqueues);
79095402b38SGautham R Shenoy 		spin_unlock(&workqueue_lock);
7913af24433SOleg Nesterov 
7923af24433SOleg Nesterov 		for_each_possible_cpu(cpu) {
7933af24433SOleg Nesterov 			cwq = init_cpu_workqueue(wq, cpu);
7943af24433SOleg Nesterov 			if (err || !cpu_online(cpu))
7953af24433SOleg Nesterov 				continue;
7963af24433SOleg Nesterov 			err = create_workqueue_thread(cwq, cpu);
79706ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
7983af24433SOleg Nesterov 		}
79995402b38SGautham R Shenoy 		put_online_cpus();
8003af24433SOleg Nesterov 	}
8013af24433SOleg Nesterov 
8023af24433SOleg Nesterov 	if (err) {
8033af24433SOleg Nesterov 		destroy_workqueue(wq);
8043af24433SOleg Nesterov 		wq = NULL;
8053af24433SOleg Nesterov 	}
8063af24433SOleg Nesterov 	return wq;
8073af24433SOleg Nesterov }
8084e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
8093af24433SOleg Nesterov 
8101e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
8113af24433SOleg Nesterov {
8123af24433SOleg Nesterov 	/*
81314441960SOleg Nesterov 	 * Our caller is either destroy_workqueue() or CPU_DEAD,
81495402b38SGautham R Shenoy 	 * get_online_cpus() protects cwq->thread.
8153af24433SOleg Nesterov 	 */
81614441960SOleg Nesterov 	if (cwq->thread == NULL)
81714441960SOleg Nesterov 		return;
81814441960SOleg Nesterov 
8194e6045f1SJohannes Berg 	lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
8204e6045f1SJohannes Berg 	lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
8214e6045f1SJohannes Berg 
82213c22168SOleg Nesterov 	flush_cpu_workqueue(cwq);
82314441960SOleg Nesterov 	/*
82413c22168SOleg Nesterov 	 * If the caller is CPU_DEAD and cwq->worklist was not empty,
82513c22168SOleg Nesterov 	 * a concurrent flush_workqueue() can insert a barrier after us.
82613c22168SOleg Nesterov 	 * However, in that case run_workqueue() won't return and check
82713c22168SOleg Nesterov 	 * kthread_should_stop() until it flushes all work_struct's.
82814441960SOleg Nesterov 	 * When ->worklist becomes empty it is safe to exit because no
82914441960SOleg Nesterov 	 * more work_structs can be queued on this cwq: flush_workqueue
83014441960SOleg Nesterov 	 * checks list_empty(), and a "normal" queue_work() can't use
83114441960SOleg Nesterov 	 * a dead CPU.
83214441960SOleg Nesterov 	 */
83314441960SOleg Nesterov 	kthread_stop(cwq->thread);
83414441960SOleg Nesterov 	cwq->thread = NULL;
8351da177e4SLinus Torvalds }
8361da177e4SLinus Torvalds 
8373af24433SOleg Nesterov /**
8383af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
8393af24433SOleg Nesterov  * @wq: target workqueue
8403af24433SOleg Nesterov  *
8413af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
8423af24433SOleg Nesterov  */
8433af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
8443af24433SOleg Nesterov {
845b1f4ec17SOleg Nesterov 	const cpumask_t *cpu_map = wq_cpu_map(wq);
8463af24433SOleg Nesterov 	int cpu;
8473af24433SOleg Nesterov 
84895402b38SGautham R Shenoy 	get_online_cpus();
84995402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
8503af24433SOleg Nesterov 	list_del(&wq->list);
85195402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
8523af24433SOleg Nesterov 
853363ab6f1SMike Travis 	for_each_cpu_mask_nr(cpu, *cpu_map)
8541e35eaa2SOleg Nesterov 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
85500dfcaf7SOleg Nesterov 	put_online_cpus();
8563af24433SOleg Nesterov 
8573af24433SOleg Nesterov 	free_percpu(wq->cpu_wq);
8583af24433SOleg Nesterov 	kfree(wq);
8593af24433SOleg Nesterov }
8603af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
8613af24433SOleg Nesterov 
8629c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
8631da177e4SLinus Torvalds 						unsigned long action,
8641da177e4SLinus Torvalds 						void *hcpu)
8651da177e4SLinus Torvalds {
8663af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
8673af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
8681da177e4SLinus Torvalds 	struct workqueue_struct *wq;
8691da177e4SLinus Torvalds 
8708bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
8718bb78442SRafael J. Wysocki 
8721da177e4SLinus Torvalds 	switch (action) {
8733af24433SOleg Nesterov 	case CPU_UP_PREPARE:
8743af24433SOleg Nesterov 		cpu_set(cpu, cpu_populated_map);
8753af24433SOleg Nesterov 	}
8763af24433SOleg Nesterov 
8771da177e4SLinus Torvalds 	list_for_each_entry(wq, &workqueues, list) {
8783af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
8793af24433SOleg Nesterov 
8803af24433SOleg Nesterov 		switch (action) {
8813af24433SOleg Nesterov 		case CPU_UP_PREPARE:
8823af24433SOleg Nesterov 			if (!create_workqueue_thread(cwq, cpu))
8831da177e4SLinus Torvalds 				break;
88495402b38SGautham R Shenoy 			printk(KERN_ERR "workqueue [%s] for %i failed\n",
88595402b38SGautham R Shenoy 				wq->name, cpu);
8863af24433SOleg Nesterov 			return NOTIFY_BAD;
8871da177e4SLinus Torvalds 
8881da177e4SLinus Torvalds 		case CPU_ONLINE:
88906ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
8901da177e4SLinus Torvalds 			break;
8911da177e4SLinus Torvalds 
8921da177e4SLinus Torvalds 		case CPU_UP_CANCELED:
89306ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, -1);
8941da177e4SLinus Torvalds 		case CPU_DEAD:
8951e35eaa2SOleg Nesterov 			cleanup_workqueue_thread(cwq);
8961da177e4SLinus Torvalds 			break;
8971da177e4SLinus Torvalds 		}
8983af24433SOleg Nesterov 	}
8991da177e4SLinus Torvalds 
90000dfcaf7SOleg Nesterov 	switch (action) {
90100dfcaf7SOleg Nesterov 	case CPU_UP_CANCELED:
90200dfcaf7SOleg Nesterov 	case CPU_DEAD:
90300dfcaf7SOleg Nesterov 		cpu_clear(cpu, cpu_populated_map);
90400dfcaf7SOleg Nesterov 	}
90500dfcaf7SOleg Nesterov 
9061da177e4SLinus Torvalds 	return NOTIFY_OK;
9071da177e4SLinus Torvalds }
9081da177e4SLinus Torvalds 
909c12920d1SOleg Nesterov void __init init_workqueues(void)
9101da177e4SLinus Torvalds {
9113af24433SOleg Nesterov 	cpu_populated_map = cpu_online_map;
912f756d5e2SNathan Lynch 	singlethread_cpu = first_cpu(cpu_possible_map);
913b1f4ec17SOleg Nesterov 	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
9141da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
9151da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
9161da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
9171da177e4SLinus Torvalds }
918