xref: /linux-6.15/kernel/workqueue.c (revision d243769d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
121da177e4SLinus Torvalds  *   Andrew Morton <[email protected]>
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
1689ada679SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter <[email protected]>.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds /*
37f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
38f756d5e2SNathan Lynch  * possible cpu).
391da177e4SLinus Torvalds  */
401da177e4SLinus Torvalds struct cpu_workqueue_struct {
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds 	spinlock_t lock;
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds 	struct list_head worklist;
451da177e4SLinus Torvalds 	wait_queue_head_t more_work;
463af24433SOleg Nesterov 	struct work_struct *current_work;
471da177e4SLinus Torvalds 
481da177e4SLinus Torvalds 	struct workqueue_struct *wq;
4936c8b586SIngo Molnar 	struct task_struct *thread;
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds 	int run_depth;		/* Detect run_workqueue() recursion depth */
521da177e4SLinus Torvalds } ____cacheline_aligned;
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds /*
551da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
561da177e4SLinus Torvalds  * per-CPU workqueues:
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds struct workqueue_struct {
5989ada679SChristoph Lameter 	struct cpu_workqueue_struct *cpu_wq;
60cce1a165SOleg Nesterov 	struct list_head list;
611da177e4SLinus Torvalds 	const char *name;
62cce1a165SOleg Nesterov 	int singlethread;
63319c2a98SOleg Nesterov 	int freezeable;		/* Freeze threads during suspend */
641da177e4SLinus Torvalds };
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
671da177e4SLinus Torvalds    threads to each one as cpus come/go. */
689b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex);
691da177e4SLinus Torvalds static LIST_HEAD(workqueues);
701da177e4SLinus Torvalds 
713af24433SOleg Nesterov static int singlethread_cpu __read_mostly;
72b1f4ec17SOleg Nesterov static cpumask_t cpu_singlethread_map __read_mostly;
7314441960SOleg Nesterov /*
7414441960SOleg Nesterov  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
7514441960SOleg Nesterov  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
7614441960SOleg Nesterov  * which comes in between can't use for_each_online_cpu(). We could
7714441960SOleg Nesterov  * use cpu_possible_map, the cpumask below is more a documentation
7814441960SOleg Nesterov  * than optimization.
7914441960SOleg Nesterov  */
803af24433SOleg Nesterov static cpumask_t cpu_populated_map __read_mostly;
81f756d5e2SNathan Lynch 
821da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
831da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq)
841da177e4SLinus Torvalds {
85cce1a165SOleg Nesterov 	return wq->singlethread;
861da177e4SLinus Torvalds }
871da177e4SLinus Torvalds 
88b1f4ec17SOleg Nesterov static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
89b1f4ec17SOleg Nesterov {
90b1f4ec17SOleg Nesterov 	return is_single_threaded(wq)
91b1f4ec17SOleg Nesterov 		? &cpu_singlethread_map : &cpu_populated_map;
92b1f4ec17SOleg Nesterov }
93b1f4ec17SOleg Nesterov 
94a848e3b6SOleg Nesterov static
95a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
96a848e3b6SOleg Nesterov {
97a848e3b6SOleg Nesterov 	if (unlikely(is_single_threaded(wq)))
98a848e3b6SOleg Nesterov 		cpu = singlethread_cpu;
99a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
100a848e3b6SOleg Nesterov }
101a848e3b6SOleg Nesterov 
1024594bf15SDavid Howells /*
1034594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
1044594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
1054594bf15SDavid Howells  */
106ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
107ed7c0feeSOleg Nesterov 				struct cpu_workqueue_struct *cwq)
108365970a1SDavid Howells {
1094594bf15SDavid Howells 	unsigned long new;
110365970a1SDavid Howells 
1114594bf15SDavid Howells 	BUG_ON(!work_pending(work));
1124594bf15SDavid Howells 
113ed7c0feeSOleg Nesterov 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
114a08727baSLinus Torvalds 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
115a08727baSLinus Torvalds 	atomic_long_set(&work->data, new);
116365970a1SDavid Howells }
117365970a1SDavid Howells 
118ed7c0feeSOleg Nesterov static inline
119ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
120365970a1SDavid Howells {
121a08727baSLinus Torvalds 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
122365970a1SDavid Howells }
123365970a1SDavid Howells 
124b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
125b89deed3SOleg Nesterov 				struct work_struct *work, int tail)
126b89deed3SOleg Nesterov {
127b89deed3SOleg Nesterov 	set_wq_data(work, cwq);
1286e84d644SOleg Nesterov 	/*
1296e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
1306e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
1316e84d644SOleg Nesterov 	 */
1326e84d644SOleg Nesterov 	smp_wmb();
133b89deed3SOleg Nesterov 	if (tail)
134b89deed3SOleg Nesterov 		list_add_tail(&work->entry, &cwq->worklist);
135b89deed3SOleg Nesterov 	else
136b89deed3SOleg Nesterov 		list_add(&work->entry, &cwq->worklist);
137b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
138b89deed3SOleg Nesterov }
139b89deed3SOleg Nesterov 
1401da177e4SLinus Torvalds /* Preempt must be disabled. */
1411da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq,
1421da177e4SLinus Torvalds 			 struct work_struct *work)
1431da177e4SLinus Torvalds {
1441da177e4SLinus Torvalds 	unsigned long flags;
1451da177e4SLinus Torvalds 
1461da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
147b89deed3SOleg Nesterov 	insert_work(cwq, work, 1);
1481da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
1491da177e4SLinus Torvalds }
1501da177e4SLinus Torvalds 
1510fcb78c2SRolf Eike Beer /**
1520fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
1530fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1540fcb78c2SRolf Eike Beer  * @work: work to queue
1550fcb78c2SRolf Eike Beer  *
156057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1571da177e4SLinus Torvalds  *
1581da177e4SLinus Torvalds  * We queue the work to the CPU it was submitted, but there is no
1591da177e4SLinus Torvalds  * guarantee that it will be processed by that CPU.
1601da177e4SLinus Torvalds  */
1611da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
1621da177e4SLinus Torvalds {
163a848e3b6SOleg Nesterov 	int ret = 0;
1641da177e4SLinus Torvalds 
165a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1661da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
167a848e3b6SOleg Nesterov 		__queue_work(wq_per_cpu(wq, get_cpu()), work);
168a848e3b6SOleg Nesterov 		put_cpu();
1691da177e4SLinus Torvalds 		ret = 1;
1701da177e4SLinus Torvalds 	}
1711da177e4SLinus Torvalds 	return ret;
1721da177e4SLinus Torvalds }
173ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
1741da177e4SLinus Torvalds 
17582f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data)
1761da177e4SLinus Torvalds {
17752bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
178ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
179ed7c0feeSOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
1801da177e4SLinus Torvalds 
181a848e3b6SOleg Nesterov 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
1821da177e4SLinus Torvalds }
1831da177e4SLinus Torvalds 
1840fcb78c2SRolf Eike Beer /**
1850fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
1860fcb78c2SRolf Eike Beer  * @wq: workqueue to use
187af9997e4SRandy Dunlap  * @dwork: delayable work to queue
1880fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
1890fcb78c2SRolf Eike Beer  *
190057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1910fcb78c2SRolf Eike Beer  */
1921da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq,
19352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
1941da177e4SLinus Torvalds {
19563bc0362SOleg Nesterov 	timer_stats_timer_set_start_info(&dwork->timer);
19652bad64dSDavid Howells 	if (delay == 0)
19763bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
1981da177e4SLinus Torvalds 
19963bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
2001da177e4SLinus Torvalds }
201ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
2021da177e4SLinus Torvalds 
2030fcb78c2SRolf Eike Beer /**
2040fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
2050fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
2060fcb78c2SRolf Eike Beer  * @wq: workqueue to use
207af9997e4SRandy Dunlap  * @dwork: work to queue
2080fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2090fcb78c2SRolf Eike Beer  *
210057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2110fcb78c2SRolf Eike Beer  */
2127a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
21352bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2147a6bc1cdSVenkatesh Pallipadi {
2157a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
21652bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
21752bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
2187a6bc1cdSVenkatesh Pallipadi 
219a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
2207a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
2217a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
2227a6bc1cdSVenkatesh Pallipadi 
223ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
224a848e3b6SOleg Nesterov 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
2257a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
22652bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
2277a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
22863bc0362SOleg Nesterov 
22963bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
2307a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
23163bc0362SOleg Nesterov 		else
23263bc0362SOleg Nesterov 			add_timer(timer);
2337a6bc1cdSVenkatesh Pallipadi 		ret = 1;
2347a6bc1cdSVenkatesh Pallipadi 	}
2357a6bc1cdSVenkatesh Pallipadi 	return ret;
2367a6bc1cdSVenkatesh Pallipadi }
237ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
2381da177e4SLinus Torvalds 
239858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
2401da177e4SLinus Torvalds {
241f293ea92SOleg Nesterov 	spin_lock_irq(&cwq->lock);
2421da177e4SLinus Torvalds 	cwq->run_depth++;
2431da177e4SLinus Torvalds 	if (cwq->run_depth > 3) {
2441da177e4SLinus Torvalds 		/* morton gets to eat his hat */
2451da177e4SLinus Torvalds 		printk("%s: recursion depth exceeded: %d\n",
2461da177e4SLinus Torvalds 			__FUNCTION__, cwq->run_depth);
2471da177e4SLinus Torvalds 		dump_stack();
2481da177e4SLinus Torvalds 	}
2491da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
2501da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
2511da177e4SLinus Torvalds 						struct work_struct, entry);
2526bb49e59SDavid Howells 		work_func_t f = work->func;
2531da177e4SLinus Torvalds 
254b89deed3SOleg Nesterov 		cwq->current_work = work;
2551da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
256f293ea92SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
2571da177e4SLinus Torvalds 
258365970a1SDavid Howells 		BUG_ON(get_wq_data(work) != cwq);
25923b2e599SOleg Nesterov 		work_clear_pending(work);
26065f27f38SDavid Howells 		f(work);
2611da177e4SLinus Torvalds 
262d5abe669SPeter Zijlstra 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
263d5abe669SPeter Zijlstra 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
264d5abe669SPeter Zijlstra 					"%s/0x%08x/%d\n",
265d5abe669SPeter Zijlstra 					current->comm, preempt_count(),
266d5abe669SPeter Zijlstra 				       	current->pid);
267d5abe669SPeter Zijlstra 			printk(KERN_ERR "    last function: ");
268d5abe669SPeter Zijlstra 			print_symbol("%s\n", (unsigned long)f);
269d5abe669SPeter Zijlstra 			debug_show_held_locks(current);
270d5abe669SPeter Zijlstra 			dump_stack();
271d5abe669SPeter Zijlstra 		}
272d5abe669SPeter Zijlstra 
273f293ea92SOleg Nesterov 		spin_lock_irq(&cwq->lock);
274b89deed3SOleg Nesterov 		cwq->current_work = NULL;
2751da177e4SLinus Torvalds 	}
2761da177e4SLinus Torvalds 	cwq->run_depth--;
277f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
2781da177e4SLinus Torvalds }
2791da177e4SLinus Torvalds 
2801da177e4SLinus Torvalds static int worker_thread(void *__cwq)
2811da177e4SLinus Torvalds {
2821da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
2833af24433SOleg Nesterov 	DEFINE_WAIT(wait);
2841da177e4SLinus Torvalds 
28583144186SRafael J. Wysocki 	if (cwq->wq->freezeable)
28683144186SRafael J. Wysocki 		set_freezable();
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds 	set_user_nice(current, -5);
2891da177e4SLinus Torvalds 
2903af24433SOleg Nesterov 	for (;;) {
2913af24433SOleg Nesterov 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
29214441960SOleg Nesterov 		if (!freezing(current) &&
29314441960SOleg Nesterov 		    !kthread_should_stop() &&
29414441960SOleg Nesterov 		    list_empty(&cwq->worklist))
2951da177e4SLinus Torvalds 			schedule();
2963af24433SOleg Nesterov 		finish_wait(&cwq->more_work, &wait);
2971da177e4SLinus Torvalds 
29885f4186aSOleg Nesterov 		try_to_freeze();
29985f4186aSOleg Nesterov 
30014441960SOleg Nesterov 		if (kthread_should_stop())
3013af24433SOleg Nesterov 			break;
3023af24433SOleg Nesterov 
3031da177e4SLinus Torvalds 		run_workqueue(cwq);
3041da177e4SLinus Torvalds 	}
3053af24433SOleg Nesterov 
3061da177e4SLinus Torvalds 	return 0;
3071da177e4SLinus Torvalds }
3081da177e4SLinus Torvalds 
309fc2e4d70SOleg Nesterov struct wq_barrier {
310fc2e4d70SOleg Nesterov 	struct work_struct	work;
311fc2e4d70SOleg Nesterov 	struct completion	done;
312fc2e4d70SOleg Nesterov };
313fc2e4d70SOleg Nesterov 
314fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
315fc2e4d70SOleg Nesterov {
316fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
317fc2e4d70SOleg Nesterov 	complete(&barr->done);
318fc2e4d70SOleg Nesterov }
319fc2e4d70SOleg Nesterov 
32083c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
32183c22520SOleg Nesterov 					struct wq_barrier *barr, int tail)
322fc2e4d70SOleg Nesterov {
323fc2e4d70SOleg Nesterov 	INIT_WORK(&barr->work, wq_barrier_func);
324fc2e4d70SOleg Nesterov 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
325fc2e4d70SOleg Nesterov 
326fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
32783c22520SOleg Nesterov 
32883c22520SOleg Nesterov 	insert_work(cwq, &barr->work, tail);
329fc2e4d70SOleg Nesterov }
330fc2e4d70SOleg Nesterov 
33114441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
3321da177e4SLinus Torvalds {
33314441960SOleg Nesterov 	int active;
33414441960SOleg Nesterov 
3351da177e4SLinus Torvalds 	if (cwq->thread == current) {
3361da177e4SLinus Torvalds 		/*
3371da177e4SLinus Torvalds 		 * Probably keventd trying to flush its own queue. So simply run
3381da177e4SLinus Torvalds 		 * it by hand rather than deadlocking.
3391da177e4SLinus Torvalds 		 */
3401da177e4SLinus Torvalds 		run_workqueue(cwq);
34114441960SOleg Nesterov 		active = 1;
3421da177e4SLinus Torvalds 	} else {
343fc2e4d70SOleg Nesterov 		struct wq_barrier barr;
3441da177e4SLinus Torvalds 
34514441960SOleg Nesterov 		active = 0;
34683c22520SOleg Nesterov 		spin_lock_irq(&cwq->lock);
34783c22520SOleg Nesterov 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
34883c22520SOleg Nesterov 			insert_wq_barrier(cwq, &barr, 1);
34983c22520SOleg Nesterov 			active = 1;
35083c22520SOleg Nesterov 		}
35183c22520SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
3521da177e4SLinus Torvalds 
353d721304dSOleg Nesterov 		if (active)
354fc2e4d70SOleg Nesterov 			wait_for_completion(&barr.done);
3551da177e4SLinus Torvalds 	}
35614441960SOleg Nesterov 
35714441960SOleg Nesterov 	return active;
35883c22520SOleg Nesterov }
3591da177e4SLinus Torvalds 
3600fcb78c2SRolf Eike Beer /**
3611da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
3620fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
3631da177e4SLinus Torvalds  *
3641da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
3651da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
3661da177e4SLinus Torvalds  *
367fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
368fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
3691da177e4SLinus Torvalds  *
3701da177e4SLinus Torvalds  * This function used to run the workqueues itself.  Now we just wait for the
3711da177e4SLinus Torvalds  * helper threads to do it.
3721da177e4SLinus Torvalds  */
3731da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq)
3741da177e4SLinus Torvalds {
375b1f4ec17SOleg Nesterov 	const cpumask_t *cpu_map = wq_cpu_map(wq);
376cce1a165SOleg Nesterov 	int cpu;
377b1f4ec17SOleg Nesterov 
378f293ea92SOleg Nesterov 	might_sleep();
379b1f4ec17SOleg Nesterov 	for_each_cpu_mask(cpu, *cpu_map)
38089ada679SChristoph Lameter 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
3811da177e4SLinus Torvalds }
382ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
3831da177e4SLinus Torvalds 
3846e84d644SOleg Nesterov /*
3851f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
3866e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
3876e84d644SOleg Nesterov  */
3886e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
3896e84d644SOleg Nesterov {
3906e84d644SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
3911f1f642eSOleg Nesterov 	int ret = -1;
3926e84d644SOleg Nesterov 
3936e84d644SOleg Nesterov 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
3941f1f642eSOleg Nesterov 		return 0;
3956e84d644SOleg Nesterov 
3966e84d644SOleg Nesterov 	/*
3976e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
3986e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
3996e84d644SOleg Nesterov 	 */
4006e84d644SOleg Nesterov 
4016e84d644SOleg Nesterov 	cwq = get_wq_data(work);
4026e84d644SOleg Nesterov 	if (!cwq)
4036e84d644SOleg Nesterov 		return ret;
4046e84d644SOleg Nesterov 
4056e84d644SOleg Nesterov 	spin_lock_irq(&cwq->lock);
4066e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
4076e84d644SOleg Nesterov 		/*
4086e84d644SOleg Nesterov 		 * This work is queued, but perhaps we locked the wrong cwq.
4096e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
4106e84d644SOleg Nesterov 		 * insert_work()->wmb().
4116e84d644SOleg Nesterov 		 */
4126e84d644SOleg Nesterov 		smp_rmb();
4136e84d644SOleg Nesterov 		if (cwq == get_wq_data(work)) {
4146e84d644SOleg Nesterov 			list_del_init(&work->entry);
4156e84d644SOleg Nesterov 			ret = 1;
4166e84d644SOleg Nesterov 		}
4176e84d644SOleg Nesterov 	}
4186e84d644SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
4196e84d644SOleg Nesterov 
4206e84d644SOleg Nesterov 	return ret;
4216e84d644SOleg Nesterov }
4226e84d644SOleg Nesterov 
4236e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
424b89deed3SOleg Nesterov 				struct work_struct *work)
425b89deed3SOleg Nesterov {
426b89deed3SOleg Nesterov 	struct wq_barrier barr;
427b89deed3SOleg Nesterov 	int running = 0;
428b89deed3SOleg Nesterov 
429b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
430b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
43183c22520SOleg Nesterov 		insert_wq_barrier(cwq, &barr, 0);
432b89deed3SOleg Nesterov 		running = 1;
433b89deed3SOleg Nesterov 	}
434b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
435b89deed3SOleg Nesterov 
4363af24433SOleg Nesterov 	if (unlikely(running))
437b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
438b89deed3SOleg Nesterov }
439b89deed3SOleg Nesterov 
4406e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
441b89deed3SOleg Nesterov {
442b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
44328e53bddSOleg Nesterov 	struct workqueue_struct *wq;
44428e53bddSOleg Nesterov 	const cpumask_t *cpu_map;
445b1f4ec17SOleg Nesterov 	int cpu;
446b89deed3SOleg Nesterov 
447f293ea92SOleg Nesterov 	might_sleep();
448f293ea92SOleg Nesterov 
449b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
450b89deed3SOleg Nesterov 	if (!cwq)
4513af24433SOleg Nesterov 		return;
452b89deed3SOleg Nesterov 
45328e53bddSOleg Nesterov 	wq = cwq->wq;
45428e53bddSOleg Nesterov 	cpu_map = wq_cpu_map(wq);
45528e53bddSOleg Nesterov 
456b1f4ec17SOleg Nesterov 	for_each_cpu_mask(cpu, *cpu_map)
4576e84d644SOleg Nesterov 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
4586e84d644SOleg Nesterov }
4596e84d644SOleg Nesterov 
4601f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
4611f1f642eSOleg Nesterov 				struct timer_list* timer)
4621f1f642eSOleg Nesterov {
4631f1f642eSOleg Nesterov 	int ret;
4641f1f642eSOleg Nesterov 
4651f1f642eSOleg Nesterov 	do {
4661f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
4671f1f642eSOleg Nesterov 		if (!ret)
4681f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
4691f1f642eSOleg Nesterov 		wait_on_work(work);
4701f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
4711f1f642eSOleg Nesterov 
4721f1f642eSOleg Nesterov 	work_clear_pending(work);
4731f1f642eSOleg Nesterov 	return ret;
4741f1f642eSOleg Nesterov }
4751f1f642eSOleg Nesterov 
4766e84d644SOleg Nesterov /**
4776e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
4786e84d644SOleg Nesterov  * @work: the work which is to be flushed
4796e84d644SOleg Nesterov  *
4801f1f642eSOleg Nesterov  * Returns true if @work was pending.
4811f1f642eSOleg Nesterov  *
4826e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
4836e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
4846e84d644SOleg Nesterov  * has completed.
4856e84d644SOleg Nesterov  *
4866e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
4876e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
4886e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
4896e84d644SOleg Nesterov  * workqueue.
4906e84d644SOleg Nesterov  *
4916e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
4926e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
4936e84d644SOleg Nesterov  *
4946e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
4956e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
4966e84d644SOleg Nesterov  */
4971f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
4986e84d644SOleg Nesterov {
4991f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
500b89deed3SOleg Nesterov }
50128e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
502b89deed3SOleg Nesterov 
5036e84d644SOleg Nesterov /**
504f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
5056e84d644SOleg Nesterov  * @dwork: the delayed work struct
5066e84d644SOleg Nesterov  *
5071f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
5081f1f642eSOleg Nesterov  *
5096e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
5106e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
5116e84d644SOleg Nesterov  */
5121f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
5136e84d644SOleg Nesterov {
5141f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
5156e84d644SOleg Nesterov }
516f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
5171da177e4SLinus Torvalds 
5186e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
5191da177e4SLinus Torvalds 
5200fcb78c2SRolf Eike Beer /**
5210fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
5220fcb78c2SRolf Eike Beer  * @work: job to be done
5230fcb78c2SRolf Eike Beer  *
5240fcb78c2SRolf Eike Beer  * This puts a job in the kernel-global workqueue.
5250fcb78c2SRolf Eike Beer  */
5261da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work)
5271da177e4SLinus Torvalds {
5281da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
5291da177e4SLinus Torvalds }
530ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
5311da177e4SLinus Torvalds 
5320fcb78c2SRolf Eike Beer /**
5330fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
53452bad64dSDavid Howells  * @dwork: job to be done
53552bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
5360fcb78c2SRolf Eike Beer  *
5370fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
5380fcb78c2SRolf Eike Beer  * workqueue.
5390fcb78c2SRolf Eike Beer  */
54082f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork,
54182f67cd9SIngo Molnar 					unsigned long delay)
5421da177e4SLinus Torvalds {
54382f67cd9SIngo Molnar 	timer_stats_timer_set_start_info(&dwork->timer);
54452bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
5451da177e4SLinus Torvalds }
546ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
5471da177e4SLinus Torvalds 
5480fcb78c2SRolf Eike Beer /**
5490fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
5500fcb78c2SRolf Eike Beer  * @cpu: cpu to use
55152bad64dSDavid Howells  * @dwork: job to be done
5520fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
5530fcb78c2SRolf Eike Beer  *
5540fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
5550fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
5560fcb78c2SRolf Eike Beer  */
5571da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
55852bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
5591da177e4SLinus Torvalds {
56052bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
5611da177e4SLinus Torvalds }
562ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
5631da177e4SLinus Torvalds 
564b6136773SAndrew Morton /**
565b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
566b6136773SAndrew Morton  * @func: the function to call
567b6136773SAndrew Morton  *
568b6136773SAndrew Morton  * Returns zero on success.
569b6136773SAndrew Morton  * Returns -ve errno on failure.
570b6136773SAndrew Morton  *
571b6136773SAndrew Morton  * Appears to be racy against CPU hotplug.
572b6136773SAndrew Morton  *
573b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
574b6136773SAndrew Morton  */
57565f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
57615316ba8SChristoph Lameter {
57715316ba8SChristoph Lameter 	int cpu;
578b6136773SAndrew Morton 	struct work_struct *works;
57915316ba8SChristoph Lameter 
580b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
581b6136773SAndrew Morton 	if (!works)
58215316ba8SChristoph Lameter 		return -ENOMEM;
583b6136773SAndrew Morton 
584e18f3ffbSAndrew Morton 	preempt_disable();		/* CPU hotplug */
58515316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
5869bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
5879bfb1839SIngo Molnar 
5889bfb1839SIngo Molnar 		INIT_WORK(work, func);
5899bfb1839SIngo Molnar 		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
5909bfb1839SIngo Molnar 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
59115316ba8SChristoph Lameter 	}
592e18f3ffbSAndrew Morton 	preempt_enable();
59315316ba8SChristoph Lameter 	flush_workqueue(keventd_wq);
594b6136773SAndrew Morton 	free_percpu(works);
59515316ba8SChristoph Lameter 	return 0;
59615316ba8SChristoph Lameter }
59715316ba8SChristoph Lameter 
5981da177e4SLinus Torvalds void flush_scheduled_work(void)
5991da177e4SLinus Torvalds {
6001da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
6011da177e4SLinus Torvalds }
602ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
6031da177e4SLinus Torvalds 
6041da177e4SLinus Torvalds /**
6051fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
6061fa44ecaSJames Bottomley  * @fn:		the function to execute
6071fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
6081fa44ecaSJames Bottomley  *		be available when the work executes)
6091fa44ecaSJames Bottomley  *
6101fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
6111fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
6121fa44ecaSJames Bottomley  *
6131fa44ecaSJames Bottomley  * Returns:	0 - function was executed
6141fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
6151fa44ecaSJames Bottomley  */
61665f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
6171fa44ecaSJames Bottomley {
6181fa44ecaSJames Bottomley 	if (!in_interrupt()) {
61965f27f38SDavid Howells 		fn(&ew->work);
6201fa44ecaSJames Bottomley 		return 0;
6211fa44ecaSJames Bottomley 	}
6221fa44ecaSJames Bottomley 
62365f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
6241fa44ecaSJames Bottomley 	schedule_work(&ew->work);
6251fa44ecaSJames Bottomley 
6261fa44ecaSJames Bottomley 	return 1;
6271fa44ecaSJames Bottomley }
6281fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
6291fa44ecaSJames Bottomley 
6301da177e4SLinus Torvalds int keventd_up(void)
6311da177e4SLinus Torvalds {
6321da177e4SLinus Torvalds 	return keventd_wq != NULL;
6331da177e4SLinus Torvalds }
6341da177e4SLinus Torvalds 
6351da177e4SLinus Torvalds int current_is_keventd(void)
6361da177e4SLinus Torvalds {
6371da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
638*d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
6391da177e4SLinus Torvalds 	int ret = 0;
6401da177e4SLinus Torvalds 
6411da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
6421da177e4SLinus Torvalds 
64389ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
6441da177e4SLinus Torvalds 	if (current == cwq->thread)
6451da177e4SLinus Torvalds 		ret = 1;
6461da177e4SLinus Torvalds 
6471da177e4SLinus Torvalds 	return ret;
6481da177e4SLinus Torvalds 
6491da177e4SLinus Torvalds }
6501da177e4SLinus Torvalds 
6513af24433SOleg Nesterov static struct cpu_workqueue_struct *
6523af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
6531da177e4SLinus Torvalds {
65489ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
6553af24433SOleg Nesterov 
6563af24433SOleg Nesterov 	cwq->wq = wq;
6573af24433SOleg Nesterov 	spin_lock_init(&cwq->lock);
6583af24433SOleg Nesterov 	INIT_LIST_HEAD(&cwq->worklist);
6593af24433SOleg Nesterov 	init_waitqueue_head(&cwq->more_work);
6603af24433SOleg Nesterov 
6613af24433SOleg Nesterov 	return cwq;
6623af24433SOleg Nesterov }
6633af24433SOleg Nesterov 
6643af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
6653af24433SOleg Nesterov {
6663af24433SOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
6673af24433SOleg Nesterov 	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
6683af24433SOleg Nesterov 	struct task_struct *p;
6693af24433SOleg Nesterov 
6703af24433SOleg Nesterov 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
6713af24433SOleg Nesterov 	/*
6723af24433SOleg Nesterov 	 * Nobody can add the work_struct to this cwq,
6733af24433SOleg Nesterov 	 *	if (caller is __create_workqueue)
6743af24433SOleg Nesterov 	 *		nobody should see this wq
6753af24433SOleg Nesterov 	 *	else // caller is CPU_UP_PREPARE
6763af24433SOleg Nesterov 	 *		cpu is not on cpu_online_map
6773af24433SOleg Nesterov 	 * so we can abort safely.
6783af24433SOleg Nesterov 	 */
6793af24433SOleg Nesterov 	if (IS_ERR(p))
6803af24433SOleg Nesterov 		return PTR_ERR(p);
6813af24433SOleg Nesterov 
6823af24433SOleg Nesterov 	cwq->thread = p;
6833af24433SOleg Nesterov 
6843af24433SOleg Nesterov 	return 0;
6853af24433SOleg Nesterov }
6863af24433SOleg Nesterov 
68706ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
68806ba38a9SOleg Nesterov {
68906ba38a9SOleg Nesterov 	struct task_struct *p = cwq->thread;
69006ba38a9SOleg Nesterov 
69106ba38a9SOleg Nesterov 	if (p != NULL) {
69206ba38a9SOleg Nesterov 		if (cpu >= 0)
69306ba38a9SOleg Nesterov 			kthread_bind(p, cpu);
69406ba38a9SOleg Nesterov 		wake_up_process(p);
69506ba38a9SOleg Nesterov 	}
69606ba38a9SOleg Nesterov }
69706ba38a9SOleg Nesterov 
6983af24433SOleg Nesterov struct workqueue_struct *__create_workqueue(const char *name,
6993af24433SOleg Nesterov 					    int singlethread, int freezeable)
7003af24433SOleg Nesterov {
7013af24433SOleg Nesterov 	struct workqueue_struct *wq;
7023af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
7033af24433SOleg Nesterov 	int err = 0, cpu;
7043af24433SOleg Nesterov 
7053af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
7063af24433SOleg Nesterov 	if (!wq)
7073af24433SOleg Nesterov 		return NULL;
7083af24433SOleg Nesterov 
7093af24433SOleg Nesterov 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
7103af24433SOleg Nesterov 	if (!wq->cpu_wq) {
7113af24433SOleg Nesterov 		kfree(wq);
7123af24433SOleg Nesterov 		return NULL;
7133af24433SOleg Nesterov 	}
7143af24433SOleg Nesterov 
7153af24433SOleg Nesterov 	wq->name = name;
716cce1a165SOleg Nesterov 	wq->singlethread = singlethread;
7173af24433SOleg Nesterov 	wq->freezeable = freezeable;
718cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
7193af24433SOleg Nesterov 
7203af24433SOleg Nesterov 	if (singlethread) {
7213af24433SOleg Nesterov 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
7223af24433SOleg Nesterov 		err = create_workqueue_thread(cwq, singlethread_cpu);
72306ba38a9SOleg Nesterov 		start_workqueue_thread(cwq, -1);
7243af24433SOleg Nesterov 	} else {
7253af24433SOleg Nesterov 		mutex_lock(&workqueue_mutex);
7263af24433SOleg Nesterov 		list_add(&wq->list, &workqueues);
7273af24433SOleg Nesterov 
7283af24433SOleg Nesterov 		for_each_possible_cpu(cpu) {
7293af24433SOleg Nesterov 			cwq = init_cpu_workqueue(wq, cpu);
7303af24433SOleg Nesterov 			if (err || !cpu_online(cpu))
7313af24433SOleg Nesterov 				continue;
7323af24433SOleg Nesterov 			err = create_workqueue_thread(cwq, cpu);
73306ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
7343af24433SOleg Nesterov 		}
7353af24433SOleg Nesterov 		mutex_unlock(&workqueue_mutex);
7363af24433SOleg Nesterov 	}
7373af24433SOleg Nesterov 
7383af24433SOleg Nesterov 	if (err) {
7393af24433SOleg Nesterov 		destroy_workqueue(wq);
7403af24433SOleg Nesterov 		wq = NULL;
7413af24433SOleg Nesterov 	}
7423af24433SOleg Nesterov 	return wq;
7433af24433SOleg Nesterov }
7443af24433SOleg Nesterov EXPORT_SYMBOL_GPL(__create_workqueue);
7453af24433SOleg Nesterov 
7463af24433SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
7473af24433SOleg Nesterov {
7483af24433SOleg Nesterov 	/*
74914441960SOleg Nesterov 	 * Our caller is either destroy_workqueue() or CPU_DEAD,
75014441960SOleg Nesterov 	 * workqueue_mutex protects cwq->thread
7513af24433SOleg Nesterov 	 */
75214441960SOleg Nesterov 	if (cwq->thread == NULL)
75314441960SOleg Nesterov 		return;
75414441960SOleg Nesterov 
75513c22168SOleg Nesterov 	flush_cpu_workqueue(cwq);
75614441960SOleg Nesterov 	/*
75713c22168SOleg Nesterov 	 * If the caller is CPU_DEAD and cwq->worklist was not empty,
75813c22168SOleg Nesterov 	 * a concurrent flush_workqueue() can insert a barrier after us.
75913c22168SOleg Nesterov 	 * However, in that case run_workqueue() won't return and check
76013c22168SOleg Nesterov 	 * kthread_should_stop() until it flushes all work_struct's.
76114441960SOleg Nesterov 	 * When ->worklist becomes empty it is safe to exit because no
76214441960SOleg Nesterov 	 * more work_structs can be queued on this cwq: flush_workqueue
76314441960SOleg Nesterov 	 * checks list_empty(), and a "normal" queue_work() can't use
76414441960SOleg Nesterov 	 * a dead CPU.
76514441960SOleg Nesterov 	 */
76614441960SOleg Nesterov 	kthread_stop(cwq->thread);
76714441960SOleg Nesterov 	cwq->thread = NULL;
7681da177e4SLinus Torvalds }
7691da177e4SLinus Torvalds 
7703af24433SOleg Nesterov /**
7713af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
7723af24433SOleg Nesterov  * @wq: target workqueue
7733af24433SOleg Nesterov  *
7743af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
7753af24433SOleg Nesterov  */
7763af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
7773af24433SOleg Nesterov {
778b1f4ec17SOleg Nesterov 	const cpumask_t *cpu_map = wq_cpu_map(wq);
7793af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
7803af24433SOleg Nesterov 	int cpu;
7813af24433SOleg Nesterov 
7823af24433SOleg Nesterov 	mutex_lock(&workqueue_mutex);
7833af24433SOleg Nesterov 	list_del(&wq->list);
7843af24433SOleg Nesterov 	mutex_unlock(&workqueue_mutex);
7853af24433SOleg Nesterov 
786b1f4ec17SOleg Nesterov 	for_each_cpu_mask(cpu, *cpu_map) {
7873af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
7883af24433SOleg Nesterov 		cleanup_workqueue_thread(cwq, cpu);
7893af24433SOleg Nesterov 	}
7903af24433SOleg Nesterov 
7913af24433SOleg Nesterov 	free_percpu(wq->cpu_wq);
7923af24433SOleg Nesterov 	kfree(wq);
7933af24433SOleg Nesterov }
7943af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
7953af24433SOleg Nesterov 
7969c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
7971da177e4SLinus Torvalds 						unsigned long action,
7981da177e4SLinus Torvalds 						void *hcpu)
7991da177e4SLinus Torvalds {
8003af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
8013af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
8021da177e4SLinus Torvalds 	struct workqueue_struct *wq;
8031da177e4SLinus Torvalds 
8048bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
8058bb78442SRafael J. Wysocki 
8061da177e4SLinus Torvalds 	switch (action) {
8073af24433SOleg Nesterov 	case CPU_LOCK_ACQUIRE:
8089b41ea72SAndrew Morton 		mutex_lock(&workqueue_mutex);
8093af24433SOleg Nesterov 		return NOTIFY_OK;
8103af24433SOleg Nesterov 
8113af24433SOleg Nesterov 	case CPU_LOCK_RELEASE:
8123af24433SOleg Nesterov 		mutex_unlock(&workqueue_mutex);
8133af24433SOleg Nesterov 		return NOTIFY_OK;
8143af24433SOleg Nesterov 
8153af24433SOleg Nesterov 	case CPU_UP_PREPARE:
8163af24433SOleg Nesterov 		cpu_set(cpu, cpu_populated_map);
8173af24433SOleg Nesterov 	}
8183af24433SOleg Nesterov 
8191da177e4SLinus Torvalds 	list_for_each_entry(wq, &workqueues, list) {
8203af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
8213af24433SOleg Nesterov 
8223af24433SOleg Nesterov 		switch (action) {
8233af24433SOleg Nesterov 		case CPU_UP_PREPARE:
8243af24433SOleg Nesterov 			if (!create_workqueue_thread(cwq, cpu))
8251da177e4SLinus Torvalds 				break;
8263af24433SOleg Nesterov 			printk(KERN_ERR "workqueue for %i failed\n", cpu);
8273af24433SOleg Nesterov 			return NOTIFY_BAD;
8281da177e4SLinus Torvalds 
8291da177e4SLinus Torvalds 		case CPU_ONLINE:
83006ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
8311da177e4SLinus Torvalds 			break;
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 		case CPU_UP_CANCELED:
83406ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, -1);
8351da177e4SLinus Torvalds 		case CPU_DEAD:
8363af24433SOleg Nesterov 			cleanup_workqueue_thread(cwq, cpu);
8371da177e4SLinus Torvalds 			break;
8381da177e4SLinus Torvalds 		}
8393af24433SOleg Nesterov 	}
8401da177e4SLinus Torvalds 
8411da177e4SLinus Torvalds 	return NOTIFY_OK;
8421da177e4SLinus Torvalds }
8431da177e4SLinus Torvalds 
844c12920d1SOleg Nesterov void __init init_workqueues(void)
8451da177e4SLinus Torvalds {
8463af24433SOleg Nesterov 	cpu_populated_map = cpu_online_map;
847f756d5e2SNathan Lynch 	singlethread_cpu = first_cpu(cpu_possible_map);
848b1f4ec17SOleg Nesterov 	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
8491da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
8501da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
8511da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
8521da177e4SLinus Torvalds }
853