xref: /linux-6.15/kernel/workqueue.c (revision d721304d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
121da177e4SLinus Torvalds  *   Andrew Morton <[email protected]>
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
1689ada679SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter <[email protected]>.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds /*
37f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
38f756d5e2SNathan Lynch  * possible cpu).
391da177e4SLinus Torvalds  */
401da177e4SLinus Torvalds struct cpu_workqueue_struct {
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds 	spinlock_t lock;
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds 	struct list_head worklist;
451da177e4SLinus Torvalds 	wait_queue_head_t more_work;
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds 	struct workqueue_struct *wq;
4836c8b586SIngo Molnar 	struct task_struct *thread;
49b89deed3SOleg Nesterov 	struct work_struct *current_work;
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds 	int run_depth;		/* Detect run_workqueue() recursion depth */
521da177e4SLinus Torvalds } ____cacheline_aligned;
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds /*
551da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
561da177e4SLinus Torvalds  * per-CPU workqueues:
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds struct workqueue_struct {
5989ada679SChristoph Lameter 	struct cpu_workqueue_struct *cpu_wq;
601da177e4SLinus Torvalds 	const char *name;
611da177e4SLinus Torvalds 	struct list_head list; 	/* Empty if single thread */
62319c2a98SOleg Nesterov 	int freezeable;		/* Freeze threads during suspend */
631da177e4SLinus Torvalds };
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
661da177e4SLinus Torvalds    threads to each one as cpus come/go. */
67*d721304dSOleg Nesterov static long migrate_sequence __read_mostly;
689b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex);
691da177e4SLinus Torvalds static LIST_HEAD(workqueues);
701da177e4SLinus Torvalds 
71f756d5e2SNathan Lynch static int singlethread_cpu;
72f756d5e2SNathan Lynch 
731da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
741da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq)
751da177e4SLinus Torvalds {
761da177e4SLinus Torvalds 	return list_empty(&wq->list);
771da177e4SLinus Torvalds }
781da177e4SLinus Torvalds 
794594bf15SDavid Howells /*
804594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
814594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
824594bf15SDavid Howells  */
83365970a1SDavid Howells static inline void set_wq_data(struct work_struct *work, void *wq)
84365970a1SDavid Howells {
854594bf15SDavid Howells 	unsigned long new;
86365970a1SDavid Howells 
874594bf15SDavid Howells 	BUG_ON(!work_pending(work));
884594bf15SDavid Howells 
89365970a1SDavid Howells 	new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
90a08727baSLinus Torvalds 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
91a08727baSLinus Torvalds 	atomic_long_set(&work->data, new);
92365970a1SDavid Howells }
93365970a1SDavid Howells 
94365970a1SDavid Howells static inline void *get_wq_data(struct work_struct *work)
95365970a1SDavid Howells {
96a08727baSLinus Torvalds 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
97365970a1SDavid Howells }
98365970a1SDavid Howells 
9968380b58SLinus Torvalds static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
10068380b58SLinus Torvalds {
10168380b58SLinus Torvalds 	int ret = 0;
10268380b58SLinus Torvalds 	unsigned long flags;
10368380b58SLinus Torvalds 
10468380b58SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
10568380b58SLinus Torvalds 	/*
10668380b58SLinus Torvalds 	 * We need to re-validate the work info after we've gotten
10768380b58SLinus Torvalds 	 * the cpu_workqueue lock. We can run the work now iff:
10868380b58SLinus Torvalds 	 *
10968380b58SLinus Torvalds 	 *  - the wq_data still matches the cpu_workqueue_struct
11068380b58SLinus Torvalds 	 *  - AND the work is still marked pending
11168380b58SLinus Torvalds 	 *  - AND the work is still on a list (which will be this
11268380b58SLinus Torvalds 	 *    workqueue_struct list)
11368380b58SLinus Torvalds 	 *
11468380b58SLinus Torvalds 	 * All these conditions are important, because we
11568380b58SLinus Torvalds 	 * need to protect against the work being run right
11668380b58SLinus Torvalds 	 * now on another CPU (all but the last one might be
11768380b58SLinus Torvalds 	 * true if it's currently running and has not been
11868380b58SLinus Torvalds 	 * released yet, for example).
11968380b58SLinus Torvalds 	 */
12068380b58SLinus Torvalds 	if (get_wq_data(work) == cwq
12168380b58SLinus Torvalds 	    && work_pending(work)
12268380b58SLinus Torvalds 	    && !list_empty(&work->entry)) {
12368380b58SLinus Torvalds 		work_func_t f = work->func;
124b89deed3SOleg Nesterov 		cwq->current_work = work;
12568380b58SLinus Torvalds 		list_del_init(&work->entry);
12668380b58SLinus Torvalds 		spin_unlock_irqrestore(&cwq->lock, flags);
12768380b58SLinus Torvalds 
128a08727baSLinus Torvalds 		if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
12968380b58SLinus Torvalds 			work_release(work);
13068380b58SLinus Torvalds 		f(work);
13168380b58SLinus Torvalds 
13268380b58SLinus Torvalds 		spin_lock_irqsave(&cwq->lock, flags);
133b89deed3SOleg Nesterov 		cwq->current_work = NULL;
13468380b58SLinus Torvalds 		ret = 1;
13568380b58SLinus Torvalds 	}
13668380b58SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
13768380b58SLinus Torvalds 	return ret;
13868380b58SLinus Torvalds }
13968380b58SLinus Torvalds 
14068380b58SLinus Torvalds /**
14168380b58SLinus Torvalds  * run_scheduled_work - run scheduled work synchronously
14268380b58SLinus Torvalds  * @work: work to run
14368380b58SLinus Torvalds  *
14468380b58SLinus Torvalds  * This checks if the work was pending, and runs it
14568380b58SLinus Torvalds  * synchronously if so. It returns a boolean to indicate
14668380b58SLinus Torvalds  * whether it had any scheduled work to run or not.
14768380b58SLinus Torvalds  *
14868380b58SLinus Torvalds  * NOTE! This _only_ works for normal work_structs. You
14968380b58SLinus Torvalds  * CANNOT use this for delayed work, because the wq data
15068380b58SLinus Torvalds  * for delayed work will not point properly to the per-
15168380b58SLinus Torvalds  * CPU workqueue struct, but will change!
15268380b58SLinus Torvalds  */
15368380b58SLinus Torvalds int fastcall run_scheduled_work(struct work_struct *work)
15468380b58SLinus Torvalds {
15568380b58SLinus Torvalds 	for (;;) {
15668380b58SLinus Torvalds 		struct cpu_workqueue_struct *cwq;
15768380b58SLinus Torvalds 
15868380b58SLinus Torvalds 		if (!work_pending(work))
15968380b58SLinus Torvalds 			return 0;
16068380b58SLinus Torvalds 		if (list_empty(&work->entry))
16168380b58SLinus Torvalds 			return 0;
16268380b58SLinus Torvalds 		/* NOTE! This depends intimately on __queue_work! */
16368380b58SLinus Torvalds 		cwq = get_wq_data(work);
16468380b58SLinus Torvalds 		if (!cwq)
16568380b58SLinus Torvalds 			return 0;
16668380b58SLinus Torvalds 		if (__run_work(cwq, work))
16768380b58SLinus Torvalds 			return 1;
16868380b58SLinus Torvalds 	}
16968380b58SLinus Torvalds }
17068380b58SLinus Torvalds EXPORT_SYMBOL(run_scheduled_work);
17168380b58SLinus Torvalds 
172b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
173b89deed3SOleg Nesterov 				struct work_struct *work, int tail)
174b89deed3SOleg Nesterov {
175b89deed3SOleg Nesterov 	set_wq_data(work, cwq);
176b89deed3SOleg Nesterov 	if (tail)
177b89deed3SOleg Nesterov 		list_add_tail(&work->entry, &cwq->worklist);
178b89deed3SOleg Nesterov 	else
179b89deed3SOleg Nesterov 		list_add(&work->entry, &cwq->worklist);
180b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
181b89deed3SOleg Nesterov }
182b89deed3SOleg Nesterov 
1831da177e4SLinus Torvalds /* Preempt must be disabled. */
1841da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq,
1851da177e4SLinus Torvalds 			 struct work_struct *work)
1861da177e4SLinus Torvalds {
1871da177e4SLinus Torvalds 	unsigned long flags;
1881da177e4SLinus Torvalds 
1891da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
190b89deed3SOleg Nesterov 	insert_work(cwq, work, 1);
1911da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
1921da177e4SLinus Torvalds }
1931da177e4SLinus Torvalds 
1940fcb78c2SRolf Eike Beer /**
1950fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
1960fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1970fcb78c2SRolf Eike Beer  * @work: work to queue
1980fcb78c2SRolf Eike Beer  *
199057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2001da177e4SLinus Torvalds  *
2011da177e4SLinus Torvalds  * We queue the work to the CPU it was submitted, but there is no
2021da177e4SLinus Torvalds  * guarantee that it will be processed by that CPU.
2031da177e4SLinus Torvalds  */
2041da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
2051da177e4SLinus Torvalds {
2061da177e4SLinus Torvalds 	int ret = 0, cpu = get_cpu();
2071da177e4SLinus Torvalds 
208a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
2091da177e4SLinus Torvalds 		if (unlikely(is_single_threaded(wq)))
210f756d5e2SNathan Lynch 			cpu = singlethread_cpu;
2111da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
21289ada679SChristoph Lameter 		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
2131da177e4SLinus Torvalds 		ret = 1;
2141da177e4SLinus Torvalds 	}
2151da177e4SLinus Torvalds 	put_cpu();
2161da177e4SLinus Torvalds 	return ret;
2171da177e4SLinus Torvalds }
218ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
2191da177e4SLinus Torvalds 
22082f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data)
2211da177e4SLinus Torvalds {
22252bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
223365970a1SDavid Howells 	struct workqueue_struct *wq = get_wq_data(&dwork->work);
2241da177e4SLinus Torvalds 	int cpu = smp_processor_id();
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 	if (unlikely(is_single_threaded(wq)))
227f756d5e2SNathan Lynch 		cpu = singlethread_cpu;
2281da177e4SLinus Torvalds 
22952bad64dSDavid Howells 	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
2301da177e4SLinus Torvalds }
2311da177e4SLinus Torvalds 
2320fcb78c2SRolf Eike Beer /**
2330fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
2340fcb78c2SRolf Eike Beer  * @wq: workqueue to use
235af9997e4SRandy Dunlap  * @dwork: delayable work to queue
2360fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2370fcb78c2SRolf Eike Beer  *
238057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2390fcb78c2SRolf Eike Beer  */
2401da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq,
24152bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2421da177e4SLinus Torvalds {
2431da177e4SLinus Torvalds 	int ret = 0;
24452bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
24552bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
24652bad64dSDavid Howells 
24782f67cd9SIngo Molnar 	timer_stats_timer_set_start_info(timer);
24852bad64dSDavid Howells 	if (delay == 0)
24952bad64dSDavid Howells 		return queue_work(wq, work);
2501da177e4SLinus Torvalds 
251a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
2521da177e4SLinus Torvalds 		BUG_ON(timer_pending(timer));
2531da177e4SLinus Torvalds 		BUG_ON(!list_empty(&work->entry));
2541da177e4SLinus Torvalds 
2551da177e4SLinus Torvalds 		/* This stores wq for the moment, for the timer_fn */
256365970a1SDavid Howells 		set_wq_data(work, wq);
2571da177e4SLinus Torvalds 		timer->expires = jiffies + delay;
25852bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
2591da177e4SLinus Torvalds 		timer->function = delayed_work_timer_fn;
2601da177e4SLinus Torvalds 		add_timer(timer);
2611da177e4SLinus Torvalds 		ret = 1;
2621da177e4SLinus Torvalds 	}
2631da177e4SLinus Torvalds 	return ret;
2641da177e4SLinus Torvalds }
265ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
2661da177e4SLinus Torvalds 
2670fcb78c2SRolf Eike Beer /**
2680fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
2690fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
2700fcb78c2SRolf Eike Beer  * @wq: workqueue to use
271af9997e4SRandy Dunlap  * @dwork: work to queue
2720fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2730fcb78c2SRolf Eike Beer  *
274057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2750fcb78c2SRolf Eike Beer  */
2767a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
27752bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2787a6bc1cdSVenkatesh Pallipadi {
2797a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
28052bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
28152bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
2827a6bc1cdSVenkatesh Pallipadi 
283a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
2847a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
2857a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
2867a6bc1cdSVenkatesh Pallipadi 
2877a6bc1cdSVenkatesh Pallipadi 		/* This stores wq for the moment, for the timer_fn */
288365970a1SDavid Howells 		set_wq_data(work, wq);
2897a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
29052bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
2917a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
2927a6bc1cdSVenkatesh Pallipadi 		add_timer_on(timer, cpu);
2937a6bc1cdSVenkatesh Pallipadi 		ret = 1;
2947a6bc1cdSVenkatesh Pallipadi 	}
2957a6bc1cdSVenkatesh Pallipadi 	return ret;
2967a6bc1cdSVenkatesh Pallipadi }
297ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
2981da177e4SLinus Torvalds 
299858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
3001da177e4SLinus Torvalds {
3011da177e4SLinus Torvalds 	unsigned long flags;
3021da177e4SLinus Torvalds 
3031da177e4SLinus Torvalds 	/*
3041da177e4SLinus Torvalds 	 * Keep taking off work from the queue until
3051da177e4SLinus Torvalds 	 * done.
3061da177e4SLinus Torvalds 	 */
3071da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
3081da177e4SLinus Torvalds 	cwq->run_depth++;
3091da177e4SLinus Torvalds 	if (cwq->run_depth > 3) {
3101da177e4SLinus Torvalds 		/* morton gets to eat his hat */
3111da177e4SLinus Torvalds 		printk("%s: recursion depth exceeded: %d\n",
3121da177e4SLinus Torvalds 			__FUNCTION__, cwq->run_depth);
3131da177e4SLinus Torvalds 		dump_stack();
3141da177e4SLinus Torvalds 	}
3151da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
3161da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
3171da177e4SLinus Torvalds 						struct work_struct, entry);
3186bb49e59SDavid Howells 		work_func_t f = work->func;
3191da177e4SLinus Torvalds 
320b89deed3SOleg Nesterov 		cwq->current_work = work;
3211da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
3221da177e4SLinus Torvalds 		spin_unlock_irqrestore(&cwq->lock, flags);
3231da177e4SLinus Torvalds 
324365970a1SDavid Howells 		BUG_ON(get_wq_data(work) != cwq);
325a08727baSLinus Torvalds 		if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
32665f27f38SDavid Howells 			work_release(work);
32765f27f38SDavid Howells 		f(work);
3281da177e4SLinus Torvalds 
329d5abe669SPeter Zijlstra 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
330d5abe669SPeter Zijlstra 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
331d5abe669SPeter Zijlstra 					"%s/0x%08x/%d\n",
332d5abe669SPeter Zijlstra 					current->comm, preempt_count(),
333d5abe669SPeter Zijlstra 				       	current->pid);
334d5abe669SPeter Zijlstra 			printk(KERN_ERR "    last function: ");
335d5abe669SPeter Zijlstra 			print_symbol("%s\n", (unsigned long)f);
336d5abe669SPeter Zijlstra 			debug_show_held_locks(current);
337d5abe669SPeter Zijlstra 			dump_stack();
338d5abe669SPeter Zijlstra 		}
339d5abe669SPeter Zijlstra 
3401da177e4SLinus Torvalds 		spin_lock_irqsave(&cwq->lock, flags);
341b89deed3SOleg Nesterov 		cwq->current_work = NULL;
3421da177e4SLinus Torvalds 	}
3431da177e4SLinus Torvalds 	cwq->run_depth--;
3441da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
3451da177e4SLinus Torvalds }
3461da177e4SLinus Torvalds 
3471da177e4SLinus Torvalds static int worker_thread(void *__cwq)
3481da177e4SLinus Torvalds {
3491da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
3501da177e4SLinus Torvalds 	DECLARE_WAITQUEUE(wait, current);
3511da177e4SLinus Torvalds 	struct k_sigaction sa;
3521da177e4SLinus Torvalds 	sigset_t blocked;
3531da177e4SLinus Torvalds 
354319c2a98SOleg Nesterov 	if (!cwq->wq->freezeable)
3551da177e4SLinus Torvalds 		current->flags |= PF_NOFREEZE;
3561da177e4SLinus Torvalds 
3571da177e4SLinus Torvalds 	set_user_nice(current, -5);
3581da177e4SLinus Torvalds 
3591da177e4SLinus Torvalds 	/* Block and flush all signals */
3601da177e4SLinus Torvalds 	sigfillset(&blocked);
3611da177e4SLinus Torvalds 	sigprocmask(SIG_BLOCK, &blocked, NULL);
3621da177e4SLinus Torvalds 	flush_signals(current);
3631da177e4SLinus Torvalds 
36446934023SChristoph Lameter 	/*
36546934023SChristoph Lameter 	 * We inherited MPOL_INTERLEAVE from the booting kernel.
36646934023SChristoph Lameter 	 * Set MPOL_DEFAULT to insure node local allocations.
36746934023SChristoph Lameter 	 */
36846934023SChristoph Lameter 	numa_default_policy();
36946934023SChristoph Lameter 
3701da177e4SLinus Torvalds 	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
3711da177e4SLinus Torvalds 	sa.sa.sa_handler = SIG_IGN;
3721da177e4SLinus Torvalds 	sa.sa.sa_flags = 0;
3731da177e4SLinus Torvalds 	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
3741da177e4SLinus Torvalds 	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
3751da177e4SLinus Torvalds 
3761da177e4SLinus Torvalds 	set_current_state(TASK_INTERRUPTIBLE);
3771da177e4SLinus Torvalds 	while (!kthread_should_stop()) {
378319c2a98SOleg Nesterov 		if (cwq->wq->freezeable)
379341a5958SRafael J. Wysocki 			try_to_freeze();
380341a5958SRafael J. Wysocki 
3811da177e4SLinus Torvalds 		add_wait_queue(&cwq->more_work, &wait);
3821da177e4SLinus Torvalds 		if (list_empty(&cwq->worklist))
3831da177e4SLinus Torvalds 			schedule();
3841da177e4SLinus Torvalds 		else
3851da177e4SLinus Torvalds 			__set_current_state(TASK_RUNNING);
3861da177e4SLinus Torvalds 		remove_wait_queue(&cwq->more_work, &wait);
3871da177e4SLinus Torvalds 
3881da177e4SLinus Torvalds 		if (!list_empty(&cwq->worklist))
3891da177e4SLinus Torvalds 			run_workqueue(cwq);
3901da177e4SLinus Torvalds 		set_current_state(TASK_INTERRUPTIBLE);
3911da177e4SLinus Torvalds 	}
3921da177e4SLinus Torvalds 	__set_current_state(TASK_RUNNING);
3931da177e4SLinus Torvalds 	return 0;
3941da177e4SLinus Torvalds }
3951da177e4SLinus Torvalds 
396fc2e4d70SOleg Nesterov struct wq_barrier {
397fc2e4d70SOleg Nesterov 	struct work_struct	work;
398fc2e4d70SOleg Nesterov 	struct completion	done;
399fc2e4d70SOleg Nesterov };
400fc2e4d70SOleg Nesterov 
401fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
402fc2e4d70SOleg Nesterov {
403fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
404fc2e4d70SOleg Nesterov 	complete(&barr->done);
405fc2e4d70SOleg Nesterov }
406fc2e4d70SOleg Nesterov 
40783c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
40883c22520SOleg Nesterov 					struct wq_barrier *barr, int tail)
409fc2e4d70SOleg Nesterov {
410fc2e4d70SOleg Nesterov 	INIT_WORK(&barr->work, wq_barrier_func);
411fc2e4d70SOleg Nesterov 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
412fc2e4d70SOleg Nesterov 
413fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
41483c22520SOleg Nesterov 
41583c22520SOleg Nesterov 	insert_work(cwq, &barr->work, tail);
416fc2e4d70SOleg Nesterov }
417fc2e4d70SOleg Nesterov 
4181da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
4191da177e4SLinus Torvalds {
4201da177e4SLinus Torvalds 	if (cwq->thread == current) {
4211da177e4SLinus Torvalds 		/*
4221da177e4SLinus Torvalds 		 * Probably keventd trying to flush its own queue. So simply run
4231da177e4SLinus Torvalds 		 * it by hand rather than deadlocking.
4241da177e4SLinus Torvalds 		 */
4251da177e4SLinus Torvalds 		run_workqueue(cwq);
4261da177e4SLinus Torvalds 	} else {
427fc2e4d70SOleg Nesterov 		struct wq_barrier barr;
42883c22520SOleg Nesterov 		int active = 0;
4291da177e4SLinus Torvalds 
43083c22520SOleg Nesterov 		spin_lock_irq(&cwq->lock);
43183c22520SOleg Nesterov 		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
43283c22520SOleg Nesterov 			insert_wq_barrier(cwq, &barr, 1);
43383c22520SOleg Nesterov 			active = 1;
43483c22520SOleg Nesterov 		}
43583c22520SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
4361da177e4SLinus Torvalds 
437*d721304dSOleg Nesterov 		if (active)
438fc2e4d70SOleg Nesterov 			wait_for_completion(&barr.done);
4391da177e4SLinus Torvalds 	}
44083c22520SOleg Nesterov }
4411da177e4SLinus Torvalds 
4420fcb78c2SRolf Eike Beer /**
4431da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
4440fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
4451da177e4SLinus Torvalds  *
4461da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
4471da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
4481da177e4SLinus Torvalds  *
449fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
450fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
4511da177e4SLinus Torvalds  *
4521da177e4SLinus Torvalds  * This function used to run the workqueues itself.  Now we just wait for the
4531da177e4SLinus Torvalds  * helper threads to do it.
4541da177e4SLinus Torvalds  */
4551da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq)
4561da177e4SLinus Torvalds {
4571da177e4SLinus Torvalds 	if (is_single_threaded(wq)) {
458bce61dd4SBen Collins 		/* Always use first cpu's area. */
459f756d5e2SNathan Lynch 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
4601da177e4SLinus Torvalds 	} else {
461*d721304dSOleg Nesterov 		long sequence;
4621da177e4SLinus Torvalds 		int cpu;
463*d721304dSOleg Nesterov again:
464*d721304dSOleg Nesterov 		sequence = migrate_sequence;
4651da177e4SLinus Torvalds 
466*d721304dSOleg Nesterov 		for_each_possible_cpu(cpu)
46789ada679SChristoph Lameter 			flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
468*d721304dSOleg Nesterov 
469*d721304dSOleg Nesterov 		if (unlikely(sequence != migrate_sequence))
470*d721304dSOleg Nesterov 			goto again;
4711da177e4SLinus Torvalds 	}
4721da177e4SLinus Torvalds }
473ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
4741da177e4SLinus Torvalds 
475b89deed3SOleg Nesterov static void wait_on_work(struct cpu_workqueue_struct *cwq,
476b89deed3SOleg Nesterov 				struct work_struct *work)
477b89deed3SOleg Nesterov {
478b89deed3SOleg Nesterov 	struct wq_barrier barr;
479b89deed3SOleg Nesterov 	int running = 0;
480b89deed3SOleg Nesterov 
481b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
482b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
48383c22520SOleg Nesterov 		insert_wq_barrier(cwq, &barr, 0);
484b89deed3SOleg Nesterov 		running = 1;
485b89deed3SOleg Nesterov 	}
486b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
487b89deed3SOleg Nesterov 
488b89deed3SOleg Nesterov 	if (unlikely(running)) {
489b89deed3SOleg Nesterov 		mutex_unlock(&workqueue_mutex);
490b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
491b89deed3SOleg Nesterov 		mutex_lock(&workqueue_mutex);
492b89deed3SOleg Nesterov 	}
493b89deed3SOleg Nesterov }
494b89deed3SOleg Nesterov 
495b89deed3SOleg Nesterov /**
496b89deed3SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
497b89deed3SOleg Nesterov  * @wq: the workqueue on which the work is queued
498b89deed3SOleg Nesterov  * @work: the work which is to be flushed
499b89deed3SOleg Nesterov  *
500b89deed3SOleg Nesterov  * flush_work() will attempt to cancel the work if it is queued.  If the work's
501b89deed3SOleg Nesterov  * callback appears to be running, flush_work() will block until it has
502b89deed3SOleg Nesterov  * completed.
503b89deed3SOleg Nesterov  *
504b89deed3SOleg Nesterov  * flush_work() is designed to be used when the caller is tearing down data
505b89deed3SOleg Nesterov  * structures which the callback function operates upon.  It is expected that,
506b89deed3SOleg Nesterov  * prior to calling flush_work(), the caller has arranged for the work to not
507b89deed3SOleg Nesterov  * be requeued.
508b89deed3SOleg Nesterov  */
509b89deed3SOleg Nesterov void flush_work(struct workqueue_struct *wq, struct work_struct *work)
510b89deed3SOleg Nesterov {
511b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
512b89deed3SOleg Nesterov 
513b89deed3SOleg Nesterov 	mutex_lock(&workqueue_mutex);
514b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
515b89deed3SOleg Nesterov 	/* Was it ever queued ? */
516b89deed3SOleg Nesterov 	if (!cwq)
517b89deed3SOleg Nesterov 		goto out;
518b89deed3SOleg Nesterov 
519b89deed3SOleg Nesterov 	/*
520b89deed3SOleg Nesterov 	 * This work can't be re-queued, and the lock above protects us
521b89deed3SOleg Nesterov 	 * from take_over_work(), no need to re-check that get_wq_data()
522b89deed3SOleg Nesterov 	 * is still the same when we take cwq->lock.
523b89deed3SOleg Nesterov 	 */
524b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
525b89deed3SOleg Nesterov 	list_del_init(&work->entry);
526b89deed3SOleg Nesterov 	work_release(work);
527b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
528b89deed3SOleg Nesterov 
529b89deed3SOleg Nesterov 	if (is_single_threaded(wq)) {
530b89deed3SOleg Nesterov 		/* Always use first cpu's area. */
531b89deed3SOleg Nesterov 		wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
532b89deed3SOleg Nesterov 	} else {
533b89deed3SOleg Nesterov 		int cpu;
534b89deed3SOleg Nesterov 
535b89deed3SOleg Nesterov 		for_each_online_cpu(cpu)
536b89deed3SOleg Nesterov 			wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
537b89deed3SOleg Nesterov 	}
538b89deed3SOleg Nesterov out:
539b89deed3SOleg Nesterov 	mutex_unlock(&workqueue_mutex);
540b89deed3SOleg Nesterov }
541b89deed3SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
542b89deed3SOleg Nesterov 
543*d721304dSOleg Nesterov static void init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
544*d721304dSOleg Nesterov {
545*d721304dSOleg Nesterov 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
546*d721304dSOleg Nesterov 
547*d721304dSOleg Nesterov 	cwq->wq = wq;
548*d721304dSOleg Nesterov 	spin_lock_init(&cwq->lock);
549*d721304dSOleg Nesterov 	INIT_LIST_HEAD(&cwq->worklist);
550*d721304dSOleg Nesterov 	init_waitqueue_head(&cwq->more_work);
551*d721304dSOleg Nesterov }
552*d721304dSOleg Nesterov 
5531da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
554319c2a98SOleg Nesterov 							int cpu)
5551da177e4SLinus Torvalds {
55689ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
5571da177e4SLinus Torvalds 	struct task_struct *p;
5581da177e4SLinus Torvalds 
5591da177e4SLinus Torvalds 	if (is_single_threaded(wq))
5601da177e4SLinus Torvalds 		p = kthread_create(worker_thread, cwq, "%s", wq->name);
5611da177e4SLinus Torvalds 	else
5621da177e4SLinus Torvalds 		p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
5631da177e4SLinus Torvalds 	if (IS_ERR(p))
5641da177e4SLinus Torvalds 		return NULL;
5651da177e4SLinus Torvalds 	cwq->thread = p;
5661da177e4SLinus Torvalds 	return p;
5671da177e4SLinus Torvalds }
5681da177e4SLinus Torvalds 
5691da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name,
570341a5958SRafael J. Wysocki 					    int singlethread, int freezeable)
5711da177e4SLinus Torvalds {
5721da177e4SLinus Torvalds 	int cpu, destroy = 0;
5731da177e4SLinus Torvalds 	struct workqueue_struct *wq;
5741da177e4SLinus Torvalds 	struct task_struct *p;
5751da177e4SLinus Torvalds 
576dd392710SPekka J Enberg 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
5771da177e4SLinus Torvalds 	if (!wq)
5781da177e4SLinus Torvalds 		return NULL;
5791da177e4SLinus Torvalds 
58089ada679SChristoph Lameter 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
581676121fcSBen Collins 	if (!wq->cpu_wq) {
582676121fcSBen Collins 		kfree(wq);
583676121fcSBen Collins 		return NULL;
584676121fcSBen Collins 	}
585676121fcSBen Collins 
5861da177e4SLinus Torvalds 	wq->name = name;
587319c2a98SOleg Nesterov 	wq->freezeable = freezeable;
588319c2a98SOleg Nesterov 
5899b41ea72SAndrew Morton 	mutex_lock(&workqueue_mutex);
5901da177e4SLinus Torvalds 	if (singlethread) {
5911da177e4SLinus Torvalds 		INIT_LIST_HEAD(&wq->list);
592*d721304dSOleg Nesterov 		init_cpu_workqueue(wq, singlethread_cpu);
593319c2a98SOleg Nesterov 		p = create_workqueue_thread(wq, singlethread_cpu);
5941da177e4SLinus Torvalds 		if (!p)
5951da177e4SLinus Torvalds 			destroy = 1;
5961da177e4SLinus Torvalds 		else
5971da177e4SLinus Torvalds 			wake_up_process(p);
5981da177e4SLinus Torvalds 	} else {
5991da177e4SLinus Torvalds 		list_add(&wq->list, &workqueues);
600*d721304dSOleg Nesterov 		for_each_possible_cpu(cpu) {
601*d721304dSOleg Nesterov 			init_cpu_workqueue(wq, cpu);
602*d721304dSOleg Nesterov 			if (!cpu_online(cpu))
603*d721304dSOleg Nesterov 				continue;
604*d721304dSOleg Nesterov 
605319c2a98SOleg Nesterov 			p = create_workqueue_thread(wq, cpu);
6061da177e4SLinus Torvalds 			if (p) {
6071da177e4SLinus Torvalds 				kthread_bind(p, cpu);
6081da177e4SLinus Torvalds 				wake_up_process(p);
6091da177e4SLinus Torvalds 			} else
6101da177e4SLinus Torvalds 				destroy = 1;
6111da177e4SLinus Torvalds 		}
6121da177e4SLinus Torvalds 	}
6139b41ea72SAndrew Morton 	mutex_unlock(&workqueue_mutex);
6141da177e4SLinus Torvalds 
6151da177e4SLinus Torvalds 	/*
6161da177e4SLinus Torvalds 	 * Was there any error during startup? If yes then clean up:
6171da177e4SLinus Torvalds 	 */
6181da177e4SLinus Torvalds 	if (destroy) {
6191da177e4SLinus Torvalds 		destroy_workqueue(wq);
6201da177e4SLinus Torvalds 		wq = NULL;
6211da177e4SLinus Torvalds 	}
6221da177e4SLinus Torvalds 	return wq;
6231da177e4SLinus Torvalds }
624ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue);
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
6271da177e4SLinus Torvalds {
6281da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
6291da177e4SLinus Torvalds 	unsigned long flags;
6301da177e4SLinus Torvalds 	struct task_struct *p;
6311da177e4SLinus Torvalds 
63289ada679SChristoph Lameter 	cwq = per_cpu_ptr(wq->cpu_wq, cpu);
6331da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
6341da177e4SLinus Torvalds 	p = cwq->thread;
6351da177e4SLinus Torvalds 	cwq->thread = NULL;
6361da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
6371da177e4SLinus Torvalds 	if (p)
6381da177e4SLinus Torvalds 		kthread_stop(p);
6391da177e4SLinus Torvalds }
6401da177e4SLinus Torvalds 
6410fcb78c2SRolf Eike Beer /**
6420fcb78c2SRolf Eike Beer  * destroy_workqueue - safely terminate a workqueue
6430fcb78c2SRolf Eike Beer  * @wq: target workqueue
6440fcb78c2SRolf Eike Beer  *
6450fcb78c2SRolf Eike Beer  * Safely destroy a workqueue. All work currently pending will be done first.
6460fcb78c2SRolf Eike Beer  */
6471da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq)
6481da177e4SLinus Torvalds {
6491da177e4SLinus Torvalds 	int cpu;
6501da177e4SLinus Torvalds 
6511da177e4SLinus Torvalds 	flush_workqueue(wq);
6521da177e4SLinus Torvalds 
6531da177e4SLinus Torvalds 	/* We don't need the distraction of CPUs appearing and vanishing. */
6549b41ea72SAndrew Morton 	mutex_lock(&workqueue_mutex);
6551da177e4SLinus Torvalds 	if (is_single_threaded(wq))
656f756d5e2SNathan Lynch 		cleanup_workqueue_thread(wq, singlethread_cpu);
6571da177e4SLinus Torvalds 	else {
6581da177e4SLinus Torvalds 		for_each_online_cpu(cpu)
6591da177e4SLinus Torvalds 			cleanup_workqueue_thread(wq, cpu);
6601da177e4SLinus Torvalds 		list_del(&wq->list);
6611da177e4SLinus Torvalds 	}
6629b41ea72SAndrew Morton 	mutex_unlock(&workqueue_mutex);
66389ada679SChristoph Lameter 	free_percpu(wq->cpu_wq);
6641da177e4SLinus Torvalds 	kfree(wq);
6651da177e4SLinus Torvalds }
666ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue);
6671da177e4SLinus Torvalds 
6681da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq;
6691da177e4SLinus Torvalds 
6700fcb78c2SRolf Eike Beer /**
6710fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
6720fcb78c2SRolf Eike Beer  * @work: job to be done
6730fcb78c2SRolf Eike Beer  *
6740fcb78c2SRolf Eike Beer  * This puts a job in the kernel-global workqueue.
6750fcb78c2SRolf Eike Beer  */
6761da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work)
6771da177e4SLinus Torvalds {
6781da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
6791da177e4SLinus Torvalds }
680ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
6811da177e4SLinus Torvalds 
6820fcb78c2SRolf Eike Beer /**
6830fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
68452bad64dSDavid Howells  * @dwork: job to be done
68552bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
6860fcb78c2SRolf Eike Beer  *
6870fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
6880fcb78c2SRolf Eike Beer  * workqueue.
6890fcb78c2SRolf Eike Beer  */
69082f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork,
69182f67cd9SIngo Molnar 					unsigned long delay)
6921da177e4SLinus Torvalds {
69382f67cd9SIngo Molnar 	timer_stats_timer_set_start_info(&dwork->timer);
69452bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
6951da177e4SLinus Torvalds }
696ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
6971da177e4SLinus Torvalds 
6980fcb78c2SRolf Eike Beer /**
6990fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
7000fcb78c2SRolf Eike Beer  * @cpu: cpu to use
70152bad64dSDavid Howells  * @dwork: job to be done
7020fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
7030fcb78c2SRolf Eike Beer  *
7040fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
7050fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
7060fcb78c2SRolf Eike Beer  */
7071da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
70852bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
7091da177e4SLinus Torvalds {
71052bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
7111da177e4SLinus Torvalds }
712ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
7131da177e4SLinus Torvalds 
714b6136773SAndrew Morton /**
715b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
716b6136773SAndrew Morton  * @func: the function to call
717b6136773SAndrew Morton  *
718b6136773SAndrew Morton  * Returns zero on success.
719b6136773SAndrew Morton  * Returns -ve errno on failure.
720b6136773SAndrew Morton  *
721b6136773SAndrew Morton  * Appears to be racy against CPU hotplug.
722b6136773SAndrew Morton  *
723b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
724b6136773SAndrew Morton  */
72565f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
72615316ba8SChristoph Lameter {
72715316ba8SChristoph Lameter 	int cpu;
728b6136773SAndrew Morton 	struct work_struct *works;
72915316ba8SChristoph Lameter 
730b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
731b6136773SAndrew Morton 	if (!works)
73215316ba8SChristoph Lameter 		return -ENOMEM;
733b6136773SAndrew Morton 
734e18f3ffbSAndrew Morton 	preempt_disable();		/* CPU hotplug */
73515316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
7369bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
7379bfb1839SIngo Molnar 
7389bfb1839SIngo Molnar 		INIT_WORK(work, func);
7399bfb1839SIngo Molnar 		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
7409bfb1839SIngo Molnar 		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
74115316ba8SChristoph Lameter 	}
742e18f3ffbSAndrew Morton 	preempt_enable();
74315316ba8SChristoph Lameter 	flush_workqueue(keventd_wq);
744b6136773SAndrew Morton 	free_percpu(works);
74515316ba8SChristoph Lameter 	return 0;
74615316ba8SChristoph Lameter }
74715316ba8SChristoph Lameter 
7481da177e4SLinus Torvalds void flush_scheduled_work(void)
7491da177e4SLinus Torvalds {
7501da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
7511da177e4SLinus Torvalds }
752ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
7531da177e4SLinus Torvalds 
754b89deed3SOleg Nesterov void flush_work_keventd(struct work_struct *work)
755b89deed3SOleg Nesterov {
756b89deed3SOleg Nesterov 	flush_work(keventd_wq, work);
757b89deed3SOleg Nesterov }
758b89deed3SOleg Nesterov EXPORT_SYMBOL(flush_work_keventd);
759b89deed3SOleg Nesterov 
7601da177e4SLinus Torvalds /**
76172fd4a35SRobert P. J. Day  * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
7621da177e4SLinus Torvalds  * @wq:   the controlling workqueue structure
76352bad64dSDavid Howells  * @dwork: the delayed work struct
7641da177e4SLinus Torvalds  */
76581ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
76652bad64dSDavid Howells 				       struct delayed_work *dwork)
7671da177e4SLinus Torvalds {
76852bad64dSDavid Howells 	while (!cancel_delayed_work(dwork))
7691da177e4SLinus Torvalds 		flush_workqueue(wq);
7701da177e4SLinus Torvalds }
77181ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds /**
77472fd4a35SRobert P. J. Day  * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
77552bad64dSDavid Howells  * @dwork: the delayed work struct
7761da177e4SLinus Torvalds  */
77752bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork)
7781da177e4SLinus Torvalds {
77952bad64dSDavid Howells 	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
7801da177e4SLinus Torvalds }
7811da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work);
7821da177e4SLinus Torvalds 
7831fa44ecaSJames Bottomley /**
7841fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
7851fa44ecaSJames Bottomley  * @fn:		the function to execute
7861fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
7871fa44ecaSJames Bottomley  *		be available when the work executes)
7881fa44ecaSJames Bottomley  *
7891fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
7901fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
7911fa44ecaSJames Bottomley  *
7921fa44ecaSJames Bottomley  * Returns:	0 - function was executed
7931fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
7941fa44ecaSJames Bottomley  */
79565f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
7961fa44ecaSJames Bottomley {
7971fa44ecaSJames Bottomley 	if (!in_interrupt()) {
79865f27f38SDavid Howells 		fn(&ew->work);
7991fa44ecaSJames Bottomley 		return 0;
8001fa44ecaSJames Bottomley 	}
8011fa44ecaSJames Bottomley 
80265f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
8031fa44ecaSJames Bottomley 	schedule_work(&ew->work);
8041fa44ecaSJames Bottomley 
8051fa44ecaSJames Bottomley 	return 1;
8061fa44ecaSJames Bottomley }
8071fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
8081fa44ecaSJames Bottomley 
8091da177e4SLinus Torvalds int keventd_up(void)
8101da177e4SLinus Torvalds {
8111da177e4SLinus Torvalds 	return keventd_wq != NULL;
8121da177e4SLinus Torvalds }
8131da177e4SLinus Torvalds 
8141da177e4SLinus Torvalds int current_is_keventd(void)
8151da177e4SLinus Torvalds {
8161da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
8171da177e4SLinus Torvalds 	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
8181da177e4SLinus Torvalds 	int ret = 0;
8191da177e4SLinus Torvalds 
8201da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
8211da177e4SLinus Torvalds 
82289ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
8231da177e4SLinus Torvalds 	if (current == cwq->thread)
8241da177e4SLinus Torvalds 		ret = 1;
8251da177e4SLinus Torvalds 
8261da177e4SLinus Torvalds 	return ret;
8271da177e4SLinus Torvalds 
8281da177e4SLinus Torvalds }
8291da177e4SLinus Torvalds 
8301da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */
8311da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
8321da177e4SLinus Torvalds {
83389ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
834626ab0e6SOleg Nesterov 	struct list_head list;
8351da177e4SLinus Torvalds 	struct work_struct *work;
8361da177e4SLinus Torvalds 
8371da177e4SLinus Torvalds 	spin_lock_irq(&cwq->lock);
838626ab0e6SOleg Nesterov 	list_replace_init(&cwq->worklist, &list);
839*d721304dSOleg Nesterov 	migrate_sequence++;
8401da177e4SLinus Torvalds 
8411da177e4SLinus Torvalds 	while (!list_empty(&list)) {
8421da177e4SLinus Torvalds 		printk("Taking work for %s\n", wq->name);
8431da177e4SLinus Torvalds 		work = list_entry(list.next,struct work_struct,entry);
8441da177e4SLinus Torvalds 		list_del(&work->entry);
84589ada679SChristoph Lameter 		__queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
8461da177e4SLinus Torvalds 	}
8471da177e4SLinus Torvalds 	spin_unlock_irq(&cwq->lock);
8481da177e4SLinus Torvalds }
8491da177e4SLinus Torvalds 
8501da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */
8519c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
8521da177e4SLinus Torvalds 				  unsigned long action,
8531da177e4SLinus Torvalds 				  void *hcpu)
8541da177e4SLinus Torvalds {
8551da177e4SLinus Torvalds 	unsigned int hotcpu = (unsigned long)hcpu;
8561da177e4SLinus Torvalds 	struct workqueue_struct *wq;
8571da177e4SLinus Torvalds 
8581da177e4SLinus Torvalds 	switch (action) {
8591da177e4SLinus Torvalds 	case CPU_UP_PREPARE:
8609b41ea72SAndrew Morton 		mutex_lock(&workqueue_mutex);
8611da177e4SLinus Torvalds 		/* Create a new workqueue thread for it. */
8621da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list) {
863319c2a98SOleg Nesterov 			if (!create_workqueue_thread(wq, hotcpu)) {
8641da177e4SLinus Torvalds 				printk("workqueue for %i failed\n", hotcpu);
8651da177e4SLinus Torvalds 				return NOTIFY_BAD;
8661da177e4SLinus Torvalds 			}
8671da177e4SLinus Torvalds 		}
8681da177e4SLinus Torvalds 		break;
8691da177e4SLinus Torvalds 
8701da177e4SLinus Torvalds 	case CPU_ONLINE:
8711da177e4SLinus Torvalds 		/* Kick off worker threads. */
8721da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list) {
87389ada679SChristoph Lameter 			struct cpu_workqueue_struct *cwq;
87489ada679SChristoph Lameter 
87589ada679SChristoph Lameter 			cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
87689ada679SChristoph Lameter 			kthread_bind(cwq->thread, hotcpu);
87789ada679SChristoph Lameter 			wake_up_process(cwq->thread);
8781da177e4SLinus Torvalds 		}
8799b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
8801da177e4SLinus Torvalds 		break;
8811da177e4SLinus Torvalds 
8821da177e4SLinus Torvalds 	case CPU_UP_CANCELED:
8831da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list) {
884fc75cdfaSHeiko Carstens 			if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
885fc75cdfaSHeiko Carstens 				continue;
8861da177e4SLinus Torvalds 			/* Unbind so it can run. */
88789ada679SChristoph Lameter 			kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
888a4c4af7cSHeiko Carstens 				     any_online_cpu(cpu_online_map));
8891da177e4SLinus Torvalds 			cleanup_workqueue_thread(wq, hotcpu);
8901da177e4SLinus Torvalds 		}
8919b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
8929b41ea72SAndrew Morton 		break;
8939b41ea72SAndrew Morton 
8949b41ea72SAndrew Morton 	case CPU_DOWN_PREPARE:
8959b41ea72SAndrew Morton 		mutex_lock(&workqueue_mutex);
8969b41ea72SAndrew Morton 		break;
8979b41ea72SAndrew Morton 
8989b41ea72SAndrew Morton 	case CPU_DOWN_FAILED:
8999b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
9001da177e4SLinus Torvalds 		break;
9011da177e4SLinus Torvalds 
9021da177e4SLinus Torvalds 	case CPU_DEAD:
9031da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list)
9041da177e4SLinus Torvalds 			cleanup_workqueue_thread(wq, hotcpu);
9051da177e4SLinus Torvalds 		list_for_each_entry(wq, &workqueues, list)
9061da177e4SLinus Torvalds 			take_over_work(wq, hotcpu);
9079b41ea72SAndrew Morton 		mutex_unlock(&workqueue_mutex);
9081da177e4SLinus Torvalds 		break;
9091da177e4SLinus Torvalds 	}
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 	return NOTIFY_OK;
9121da177e4SLinus Torvalds }
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds void init_workqueues(void)
9151da177e4SLinus Torvalds {
916f756d5e2SNathan Lynch 	singlethread_cpu = first_cpu(cpu_possible_map);
9171da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
9181da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
9191da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
9201da177e4SLinus Torvalds }
9211da177e4SLinus Torvalds 
922