xref: /linux-6.15/kernel/workqueue.c (revision 8c53e463)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/kernel/workqueue.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Generic mechanism for defining kernel helper threads for running
51da177e4SLinus Torvalds  * arbitrary tasks in process context.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Started by Ingo Molnar, Copyright (C) 2002
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Derived from the taskqueue/keventd code by:
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  *   David Woodhouse <[email protected]>
12e1f8e874SFrancois Cami  *   Andrew Morton
131da177e4SLinus Torvalds  *   Kai Petzke <[email protected]>
141da177e4SLinus Torvalds  *   Theodore Ts'o <[email protected]>
1589ada679SChristoph Lameter  *
16cde53535SChristoph Lameter  * Made to use alloc_percpu by Christoph Lameter.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds #include <linux/module.h>
201da177e4SLinus Torvalds #include <linux/kernel.h>
211da177e4SLinus Torvalds #include <linux/sched.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/signal.h>
241da177e4SLinus Torvalds #include <linux/completion.h>
251da177e4SLinus Torvalds #include <linux/workqueue.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cpu.h>
281da177e4SLinus Torvalds #include <linux/notifier.h>
291da177e4SLinus Torvalds #include <linux/kthread.h>
301fa44ecaSJames Bottomley #include <linux/hardirq.h>
3146934023SChristoph Lameter #include <linux/mempolicy.h>
32341a5958SRafael J. Wysocki #include <linux/freezer.h>
33d5abe669SPeter Zijlstra #include <linux/kallsyms.h>
34d5abe669SPeter Zijlstra #include <linux/debug_locks.h>
354e6045f1SJohannes Berg #include <linux/lockdep.h>
36fb39125fSZhaolei #define CREATE_TRACE_POINTS
37fb39125fSZhaolei #include <trace/events/workqueue.h>
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds /*
40f756d5e2SNathan Lynch  * The per-CPU workqueue (if single thread, we always use the first
41f756d5e2SNathan Lynch  * possible cpu).
421da177e4SLinus Torvalds  */
431da177e4SLinus Torvalds struct cpu_workqueue_struct {
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds 	spinlock_t lock;
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds 	struct list_head worklist;
481da177e4SLinus Torvalds 	wait_queue_head_t more_work;
493af24433SOleg Nesterov 	struct work_struct *current_work;
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds 	struct workqueue_struct *wq;
5236c8b586SIngo Molnar 	struct task_struct *thread;
531da177e4SLinus Torvalds } ____cacheline_aligned;
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds /*
561da177e4SLinus Torvalds  * The externally visible workqueue abstraction is an array of
571da177e4SLinus Torvalds  * per-CPU workqueues:
581da177e4SLinus Torvalds  */
591da177e4SLinus Torvalds struct workqueue_struct {
6089ada679SChristoph Lameter 	struct cpu_workqueue_struct *cpu_wq;
61cce1a165SOleg Nesterov 	struct list_head list;
621da177e4SLinus Torvalds 	const char *name;
63cce1a165SOleg Nesterov 	int singlethread;
64319c2a98SOleg Nesterov 	int freezeable;		/* Freeze threads during suspend */
650d557dc9SHeiko Carstens 	int rt;
664e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
674e6045f1SJohannes Berg 	struct lockdep_map lockdep_map;
684e6045f1SJohannes Berg #endif
691da177e4SLinus Torvalds };
701da177e4SLinus Torvalds 
7195402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */
7295402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock);
731da177e4SLinus Torvalds static LIST_HEAD(workqueues);
741da177e4SLinus Torvalds 
753af24433SOleg Nesterov static int singlethread_cpu __read_mostly;
76e7577c50SRusty Russell static const struct cpumask *cpu_singlethread_map __read_mostly;
7714441960SOleg Nesterov /*
7814441960SOleg Nesterov  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
7914441960SOleg Nesterov  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
8014441960SOleg Nesterov  * which comes in between can't use for_each_online_cpu(). We could
8114441960SOleg Nesterov  * use cpu_possible_map, the cpumask below is more a documentation
8214441960SOleg Nesterov  * than optimization.
8314441960SOleg Nesterov  */
84e7577c50SRusty Russell static cpumask_var_t cpu_populated_map __read_mostly;
85f756d5e2SNathan Lynch 
861da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */
876cc88bc4SDavid Howells static inline int is_wq_single_threaded(struct workqueue_struct *wq)
881da177e4SLinus Torvalds {
89cce1a165SOleg Nesterov 	return wq->singlethread;
901da177e4SLinus Torvalds }
911da177e4SLinus Torvalds 
92e7577c50SRusty Russell static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
93b1f4ec17SOleg Nesterov {
946cc88bc4SDavid Howells 	return is_wq_single_threaded(wq)
95e7577c50SRusty Russell 		? cpu_singlethread_map : cpu_populated_map;
96b1f4ec17SOleg Nesterov }
97b1f4ec17SOleg Nesterov 
98a848e3b6SOleg Nesterov static
99a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
100a848e3b6SOleg Nesterov {
1016cc88bc4SDavid Howells 	if (unlikely(is_wq_single_threaded(wq)))
102a848e3b6SOleg Nesterov 		cpu = singlethread_cpu;
103a848e3b6SOleg Nesterov 	return per_cpu_ptr(wq->cpu_wq, cpu);
104a848e3b6SOleg Nesterov }
105a848e3b6SOleg Nesterov 
1064594bf15SDavid Howells /*
1074594bf15SDavid Howells  * Set the workqueue on which a work item is to be run
1084594bf15SDavid Howells  * - Must *only* be called if the pending flag is set
1094594bf15SDavid Howells  */
110ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work,
111ed7c0feeSOleg Nesterov 				struct cpu_workqueue_struct *cwq)
112365970a1SDavid Howells {
1134594bf15SDavid Howells 	unsigned long new;
114365970a1SDavid Howells 
1154594bf15SDavid Howells 	BUG_ON(!work_pending(work));
1164594bf15SDavid Howells 
117ed7c0feeSOleg Nesterov 	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
118a08727baSLinus Torvalds 	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
119a08727baSLinus Torvalds 	atomic_long_set(&work->data, new);
120365970a1SDavid Howells }
121365970a1SDavid Howells 
122ed7c0feeSOleg Nesterov static inline
123ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
124365970a1SDavid Howells {
125a08727baSLinus Torvalds 	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
126365970a1SDavid Howells }
127365970a1SDavid Howells 
128b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq,
1291a4d9b0aSOleg Nesterov 			struct work_struct *work, struct list_head *head)
130b89deed3SOleg Nesterov {
131e1d8aa9fSFrederic Weisbecker 	trace_workqueue_insertion(cwq->thread, work);
132e1d8aa9fSFrederic Weisbecker 
133b89deed3SOleg Nesterov 	set_wq_data(work, cwq);
1346e84d644SOleg Nesterov 	/*
1356e84d644SOleg Nesterov 	 * Ensure that we get the right work->data if we see the
1366e84d644SOleg Nesterov 	 * result of list_add() below, see try_to_grab_pending().
1376e84d644SOleg Nesterov 	 */
1386e84d644SOleg Nesterov 	smp_wmb();
1391a4d9b0aSOleg Nesterov 	list_add_tail(&work->entry, head);
140b89deed3SOleg Nesterov 	wake_up(&cwq->more_work);
141b89deed3SOleg Nesterov }
142b89deed3SOleg Nesterov 
1431da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq,
1441da177e4SLinus Torvalds 			 struct work_struct *work)
1451da177e4SLinus Torvalds {
1461da177e4SLinus Torvalds 	unsigned long flags;
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds 	spin_lock_irqsave(&cwq->lock, flags);
1491a4d9b0aSOleg Nesterov 	insert_work(cwq, work, &cwq->worklist);
1501da177e4SLinus Torvalds 	spin_unlock_irqrestore(&cwq->lock, flags);
1511da177e4SLinus Torvalds }
1521da177e4SLinus Torvalds 
1530fcb78c2SRolf Eike Beer /**
1540fcb78c2SRolf Eike Beer  * queue_work - queue work on a workqueue
1550fcb78c2SRolf Eike Beer  * @wq: workqueue to use
1560fcb78c2SRolf Eike Beer  * @work: work to queue
1570fcb78c2SRolf Eike Beer  *
158057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
1591da177e4SLinus Torvalds  *
16000dfcaf7SOleg Nesterov  * We queue the work to the CPU on which it was submitted, but if the CPU dies
16100dfcaf7SOleg Nesterov  * it can be processed by another CPU.
1621da177e4SLinus Torvalds  */
1637ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1641da177e4SLinus Torvalds {
165ef1ca236SOleg Nesterov 	int ret;
1661da177e4SLinus Torvalds 
167ef1ca236SOleg Nesterov 	ret = queue_work_on(get_cpu(), wq, work);
168a848e3b6SOleg Nesterov 	put_cpu();
169ef1ca236SOleg Nesterov 
1701da177e4SLinus Torvalds 	return ret;
1711da177e4SLinus Torvalds }
172ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work);
1731da177e4SLinus Torvalds 
174c1a220e7SZhang Rui /**
175c1a220e7SZhang Rui  * queue_work_on - queue work on specific cpu
176c1a220e7SZhang Rui  * @cpu: CPU number to execute work on
177c1a220e7SZhang Rui  * @wq: workqueue to use
178c1a220e7SZhang Rui  * @work: work to queue
179c1a220e7SZhang Rui  *
180c1a220e7SZhang Rui  * Returns 0 if @work was already on a queue, non-zero otherwise.
181c1a220e7SZhang Rui  *
182c1a220e7SZhang Rui  * We queue the work to a specific CPU, the caller must ensure it
183c1a220e7SZhang Rui  * can't go away.
184c1a220e7SZhang Rui  */
185c1a220e7SZhang Rui int
186c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
187c1a220e7SZhang Rui {
188c1a220e7SZhang Rui 	int ret = 0;
189c1a220e7SZhang Rui 
190c1a220e7SZhang Rui 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
191c1a220e7SZhang Rui 		BUG_ON(!list_empty(&work->entry));
192c1a220e7SZhang Rui 		__queue_work(wq_per_cpu(wq, cpu), work);
193c1a220e7SZhang Rui 		ret = 1;
194c1a220e7SZhang Rui 	}
195c1a220e7SZhang Rui 	return ret;
196c1a220e7SZhang Rui }
197c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on);
198c1a220e7SZhang Rui 
1996d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data)
2001da177e4SLinus Torvalds {
20152bad64dSDavid Howells 	struct delayed_work *dwork = (struct delayed_work *)__data;
202ed7c0feeSOleg Nesterov 	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
203ed7c0feeSOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
2041da177e4SLinus Torvalds 
205a848e3b6SOleg Nesterov 	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
2061da177e4SLinus Torvalds }
2071da177e4SLinus Torvalds 
2080fcb78c2SRolf Eike Beer /**
2090fcb78c2SRolf Eike Beer  * queue_delayed_work - queue work on a workqueue after delay
2100fcb78c2SRolf Eike Beer  * @wq: workqueue to use
211af9997e4SRandy Dunlap  * @dwork: delayable work to queue
2120fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2130fcb78c2SRolf Eike Beer  *
214057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2150fcb78c2SRolf Eike Beer  */
2167ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq,
21752bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2181da177e4SLinus Torvalds {
21952bad64dSDavid Howells 	if (delay == 0)
22063bc0362SOleg Nesterov 		return queue_work(wq, &dwork->work);
2211da177e4SLinus Torvalds 
22263bc0362SOleg Nesterov 	return queue_delayed_work_on(-1, wq, dwork, delay);
2231da177e4SLinus Torvalds }
224ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work);
2251da177e4SLinus Torvalds 
2260fcb78c2SRolf Eike Beer /**
2270fcb78c2SRolf Eike Beer  * queue_delayed_work_on - queue work on specific CPU after delay
2280fcb78c2SRolf Eike Beer  * @cpu: CPU number to execute work on
2290fcb78c2SRolf Eike Beer  * @wq: workqueue to use
230af9997e4SRandy Dunlap  * @dwork: work to queue
2310fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait before queueing
2320fcb78c2SRolf Eike Beer  *
233057647fcSAlan Stern  * Returns 0 if @work was already on a queue, non-zero otherwise.
2340fcb78c2SRolf Eike Beer  */
2357a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
23652bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
2377a6bc1cdSVenkatesh Pallipadi {
2387a6bc1cdSVenkatesh Pallipadi 	int ret = 0;
23952bad64dSDavid Howells 	struct timer_list *timer = &dwork->timer;
24052bad64dSDavid Howells 	struct work_struct *work = &dwork->work;
2417a6bc1cdSVenkatesh Pallipadi 
242a08727baSLinus Torvalds 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
2437a6bc1cdSVenkatesh Pallipadi 		BUG_ON(timer_pending(timer));
2447a6bc1cdSVenkatesh Pallipadi 		BUG_ON(!list_empty(&work->entry));
2457a6bc1cdSVenkatesh Pallipadi 
2468a3e77ccSAndrew Liu 		timer_stats_timer_set_start_info(&dwork->timer);
2478a3e77ccSAndrew Liu 
248ed7c0feeSOleg Nesterov 		/* This stores cwq for the moment, for the timer_fn */
249a848e3b6SOleg Nesterov 		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
2507a6bc1cdSVenkatesh Pallipadi 		timer->expires = jiffies + delay;
25152bad64dSDavid Howells 		timer->data = (unsigned long)dwork;
2527a6bc1cdSVenkatesh Pallipadi 		timer->function = delayed_work_timer_fn;
25363bc0362SOleg Nesterov 
25463bc0362SOleg Nesterov 		if (unlikely(cpu >= 0))
2557a6bc1cdSVenkatesh Pallipadi 			add_timer_on(timer, cpu);
25663bc0362SOleg Nesterov 		else
25763bc0362SOleg Nesterov 			add_timer(timer);
2587a6bc1cdSVenkatesh Pallipadi 		ret = 1;
2597a6bc1cdSVenkatesh Pallipadi 	}
2607a6bc1cdSVenkatesh Pallipadi 	return ret;
2617a6bc1cdSVenkatesh Pallipadi }
262ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on);
2631da177e4SLinus Torvalds 
264858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq)
2651da177e4SLinus Torvalds {
266f293ea92SOleg Nesterov 	spin_lock_irq(&cwq->lock);
2671da177e4SLinus Torvalds 	while (!list_empty(&cwq->worklist)) {
2681da177e4SLinus Torvalds 		struct work_struct *work = list_entry(cwq->worklist.next,
2691da177e4SLinus Torvalds 						struct work_struct, entry);
2706bb49e59SDavid Howells 		work_func_t f = work->func;
2714e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP
2724e6045f1SJohannes Berg 		/*
2734e6045f1SJohannes Berg 		 * It is permissible to free the struct work_struct
2744e6045f1SJohannes Berg 		 * from inside the function that is called from it,
2754e6045f1SJohannes Berg 		 * this we need to take into account for lockdep too.
2764e6045f1SJohannes Berg 		 * To avoid bogus "held lock freed" warnings as well
2774e6045f1SJohannes Berg 		 * as problems when looking into work->lockdep_map,
2784e6045f1SJohannes Berg 		 * make a copy and use that here.
2794e6045f1SJohannes Berg 		 */
2804e6045f1SJohannes Berg 		struct lockdep_map lockdep_map = work->lockdep_map;
2814e6045f1SJohannes Berg #endif
282e1d8aa9fSFrederic Weisbecker 		trace_workqueue_execution(cwq->thread, work);
283b89deed3SOleg Nesterov 		cwq->current_work = work;
2841da177e4SLinus Torvalds 		list_del_init(cwq->worklist.next);
285f293ea92SOleg Nesterov 		spin_unlock_irq(&cwq->lock);
2861da177e4SLinus Torvalds 
287365970a1SDavid Howells 		BUG_ON(get_wq_data(work) != cwq);
28823b2e599SOleg Nesterov 		work_clear_pending(work);
2893295f0efSIngo Molnar 		lock_map_acquire(&cwq->wq->lockdep_map);
2903295f0efSIngo Molnar 		lock_map_acquire(&lockdep_map);
29165f27f38SDavid Howells 		f(work);
2923295f0efSIngo Molnar 		lock_map_release(&lockdep_map);
2933295f0efSIngo Molnar 		lock_map_release(&cwq->wq->lockdep_map);
2941da177e4SLinus Torvalds 
295d5abe669SPeter Zijlstra 		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
296d5abe669SPeter Zijlstra 			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
297d5abe669SPeter Zijlstra 					"%s/0x%08x/%d\n",
298d5abe669SPeter Zijlstra 					current->comm, preempt_count(),
299ba25f9dcSPavel Emelyanov 				       	task_pid_nr(current));
300d5abe669SPeter Zijlstra 			printk(KERN_ERR "    last function: ");
301d5abe669SPeter Zijlstra 			print_symbol("%s\n", (unsigned long)f);
302d5abe669SPeter Zijlstra 			debug_show_held_locks(current);
303d5abe669SPeter Zijlstra 			dump_stack();
304d5abe669SPeter Zijlstra 		}
305d5abe669SPeter Zijlstra 
306f293ea92SOleg Nesterov 		spin_lock_irq(&cwq->lock);
307b89deed3SOleg Nesterov 		cwq->current_work = NULL;
3081da177e4SLinus Torvalds 	}
309f293ea92SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
3101da177e4SLinus Torvalds }
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds static int worker_thread(void *__cwq)
3131da177e4SLinus Torvalds {
3141da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq = __cwq;
3153af24433SOleg Nesterov 	DEFINE_WAIT(wait);
3161da177e4SLinus Torvalds 
31783144186SRafael J. Wysocki 	if (cwq->wq->freezeable)
31883144186SRafael J. Wysocki 		set_freezable();
3191da177e4SLinus Torvalds 
3203af24433SOleg Nesterov 	for (;;) {
3213af24433SOleg Nesterov 		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
32214441960SOleg Nesterov 		if (!freezing(current) &&
32314441960SOleg Nesterov 		    !kthread_should_stop() &&
32414441960SOleg Nesterov 		    list_empty(&cwq->worklist))
3251da177e4SLinus Torvalds 			schedule();
3263af24433SOleg Nesterov 		finish_wait(&cwq->more_work, &wait);
3271da177e4SLinus Torvalds 
32885f4186aSOleg Nesterov 		try_to_freeze();
32985f4186aSOleg Nesterov 
33014441960SOleg Nesterov 		if (kthread_should_stop())
3313af24433SOleg Nesterov 			break;
3323af24433SOleg Nesterov 
3331da177e4SLinus Torvalds 		run_workqueue(cwq);
3341da177e4SLinus Torvalds 	}
3353af24433SOleg Nesterov 
3361da177e4SLinus Torvalds 	return 0;
3371da177e4SLinus Torvalds }
3381da177e4SLinus Torvalds 
339fc2e4d70SOleg Nesterov struct wq_barrier {
340fc2e4d70SOleg Nesterov 	struct work_struct	work;
341fc2e4d70SOleg Nesterov 	struct completion	done;
342fc2e4d70SOleg Nesterov };
343fc2e4d70SOleg Nesterov 
344fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work)
345fc2e4d70SOleg Nesterov {
346fc2e4d70SOleg Nesterov 	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
347fc2e4d70SOleg Nesterov 	complete(&barr->done);
348fc2e4d70SOleg Nesterov }
349fc2e4d70SOleg Nesterov 
35083c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
3511a4d9b0aSOleg Nesterov 			struct wq_barrier *barr, struct list_head *head)
352fc2e4d70SOleg Nesterov {
353fc2e4d70SOleg Nesterov 	INIT_WORK(&barr->work, wq_barrier_func);
354fc2e4d70SOleg Nesterov 	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
355fc2e4d70SOleg Nesterov 
356fc2e4d70SOleg Nesterov 	init_completion(&barr->done);
35783c22520SOleg Nesterov 
3581a4d9b0aSOleg Nesterov 	insert_work(cwq, &barr->work, head);
359fc2e4d70SOleg Nesterov }
360fc2e4d70SOleg Nesterov 
36114441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
3621da177e4SLinus Torvalds {
3632355b70fSLai Jiangshan 	int active = 0;
364fc2e4d70SOleg Nesterov 	struct wq_barrier barr;
3651da177e4SLinus Torvalds 
3662355b70fSLai Jiangshan 	WARN_ON(cwq->thread == current);
3672355b70fSLai Jiangshan 
36883c22520SOleg Nesterov 	spin_lock_irq(&cwq->lock);
36983c22520SOleg Nesterov 	if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
3701a4d9b0aSOleg Nesterov 		insert_wq_barrier(cwq, &barr, &cwq->worklist);
37183c22520SOleg Nesterov 		active = 1;
37283c22520SOleg Nesterov 	}
37383c22520SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
3741da177e4SLinus Torvalds 
375d721304dSOleg Nesterov 	if (active)
376fc2e4d70SOleg Nesterov 		wait_for_completion(&barr.done);
37714441960SOleg Nesterov 
37814441960SOleg Nesterov 	return active;
37983c22520SOleg Nesterov }
3801da177e4SLinus Torvalds 
3810fcb78c2SRolf Eike Beer /**
3821da177e4SLinus Torvalds  * flush_workqueue - ensure that any scheduled work has run to completion.
3830fcb78c2SRolf Eike Beer  * @wq: workqueue to flush
3841da177e4SLinus Torvalds  *
3851da177e4SLinus Torvalds  * Forces execution of the workqueue and blocks until its completion.
3861da177e4SLinus Torvalds  * This is typically used in driver shutdown handlers.
3871da177e4SLinus Torvalds  *
388fc2e4d70SOleg Nesterov  * We sleep until all works which were queued on entry have been handled,
389fc2e4d70SOleg Nesterov  * but we are not livelocked by new incoming ones.
3901da177e4SLinus Torvalds  *
3911da177e4SLinus Torvalds  * This function used to run the workqueues itself.  Now we just wait for the
3921da177e4SLinus Torvalds  * helper threads to do it.
3931da177e4SLinus Torvalds  */
3947ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq)
3951da177e4SLinus Torvalds {
396e7577c50SRusty Russell 	const struct cpumask *cpu_map = wq_cpu_map(wq);
397cce1a165SOleg Nesterov 	int cpu;
398b1f4ec17SOleg Nesterov 
399f293ea92SOleg Nesterov 	might_sleep();
4003295f0efSIngo Molnar 	lock_map_acquire(&wq->lockdep_map);
4013295f0efSIngo Molnar 	lock_map_release(&wq->lockdep_map);
402aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
40389ada679SChristoph Lameter 		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
4041da177e4SLinus Torvalds }
405ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue);
4061da177e4SLinus Torvalds 
407db700897SOleg Nesterov /**
408db700897SOleg Nesterov  * flush_work - block until a work_struct's callback has terminated
409db700897SOleg Nesterov  * @work: the work which is to be flushed
410db700897SOleg Nesterov  *
411a67da70dSOleg Nesterov  * Returns false if @work has already terminated.
412a67da70dSOleg Nesterov  *
413db700897SOleg Nesterov  * It is expected that, prior to calling flush_work(), the caller has
414db700897SOleg Nesterov  * arranged for the work to not be requeued, otherwise it doesn't make
415db700897SOleg Nesterov  * sense to use this function.
416db700897SOleg Nesterov  */
417db700897SOleg Nesterov int flush_work(struct work_struct *work)
418db700897SOleg Nesterov {
419db700897SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
420db700897SOleg Nesterov 	struct list_head *prev;
421db700897SOleg Nesterov 	struct wq_barrier barr;
422db700897SOleg Nesterov 
423db700897SOleg Nesterov 	might_sleep();
424db700897SOleg Nesterov 	cwq = get_wq_data(work);
425db700897SOleg Nesterov 	if (!cwq)
426db700897SOleg Nesterov 		return 0;
427db700897SOleg Nesterov 
4283295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
4293295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
430a67da70dSOleg Nesterov 
431db700897SOleg Nesterov 	prev = NULL;
432db700897SOleg Nesterov 	spin_lock_irq(&cwq->lock);
433db700897SOleg Nesterov 	if (!list_empty(&work->entry)) {
434db700897SOleg Nesterov 		/*
435db700897SOleg Nesterov 		 * See the comment near try_to_grab_pending()->smp_rmb().
436db700897SOleg Nesterov 		 * If it was re-queued under us we are not going to wait.
437db700897SOleg Nesterov 		 */
438db700897SOleg Nesterov 		smp_rmb();
439db700897SOleg Nesterov 		if (unlikely(cwq != get_wq_data(work)))
440db700897SOleg Nesterov 			goto out;
441db700897SOleg Nesterov 		prev = &work->entry;
442db700897SOleg Nesterov 	} else {
443db700897SOleg Nesterov 		if (cwq->current_work != work)
444db700897SOleg Nesterov 			goto out;
445db700897SOleg Nesterov 		prev = &cwq->worklist;
446db700897SOleg Nesterov 	}
447db700897SOleg Nesterov 	insert_wq_barrier(cwq, &barr, prev->next);
448db700897SOleg Nesterov out:
449db700897SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
450db700897SOleg Nesterov 	if (!prev)
451db700897SOleg Nesterov 		return 0;
452db700897SOleg Nesterov 
453db700897SOleg Nesterov 	wait_for_completion(&barr.done);
454db700897SOleg Nesterov 	return 1;
455db700897SOleg Nesterov }
456db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work);
457db700897SOleg Nesterov 
4586e84d644SOleg Nesterov /*
4591f1f642eSOleg Nesterov  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
4606e84d644SOleg Nesterov  * so this work can't be re-armed in any way.
4616e84d644SOleg Nesterov  */
4626e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work)
4636e84d644SOleg Nesterov {
4646e84d644SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
4651f1f642eSOleg Nesterov 	int ret = -1;
4666e84d644SOleg Nesterov 
4676e84d644SOleg Nesterov 	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
4681f1f642eSOleg Nesterov 		return 0;
4696e84d644SOleg Nesterov 
4706e84d644SOleg Nesterov 	/*
4716e84d644SOleg Nesterov 	 * The queueing is in progress, or it is already queued. Try to
4726e84d644SOleg Nesterov 	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
4736e84d644SOleg Nesterov 	 */
4746e84d644SOleg Nesterov 
4756e84d644SOleg Nesterov 	cwq = get_wq_data(work);
4766e84d644SOleg Nesterov 	if (!cwq)
4776e84d644SOleg Nesterov 		return ret;
4786e84d644SOleg Nesterov 
4796e84d644SOleg Nesterov 	spin_lock_irq(&cwq->lock);
4806e84d644SOleg Nesterov 	if (!list_empty(&work->entry)) {
4816e84d644SOleg Nesterov 		/*
4826e84d644SOleg Nesterov 		 * This work is queued, but perhaps we locked the wrong cwq.
4836e84d644SOleg Nesterov 		 * In that case we must see the new value after rmb(), see
4846e84d644SOleg Nesterov 		 * insert_work()->wmb().
4856e84d644SOleg Nesterov 		 */
4866e84d644SOleg Nesterov 		smp_rmb();
4876e84d644SOleg Nesterov 		if (cwq == get_wq_data(work)) {
4886e84d644SOleg Nesterov 			list_del_init(&work->entry);
4896e84d644SOleg Nesterov 			ret = 1;
4906e84d644SOleg Nesterov 		}
4916e84d644SOleg Nesterov 	}
4926e84d644SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
4936e84d644SOleg Nesterov 
4946e84d644SOleg Nesterov 	return ret;
4956e84d644SOleg Nesterov }
4966e84d644SOleg Nesterov 
4976e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
498b89deed3SOleg Nesterov 				struct work_struct *work)
499b89deed3SOleg Nesterov {
500b89deed3SOleg Nesterov 	struct wq_barrier barr;
501b89deed3SOleg Nesterov 	int running = 0;
502b89deed3SOleg Nesterov 
503b89deed3SOleg Nesterov 	spin_lock_irq(&cwq->lock);
504b89deed3SOleg Nesterov 	if (unlikely(cwq->current_work == work)) {
5051a4d9b0aSOleg Nesterov 		insert_wq_barrier(cwq, &barr, cwq->worklist.next);
506b89deed3SOleg Nesterov 		running = 1;
507b89deed3SOleg Nesterov 	}
508b89deed3SOleg Nesterov 	spin_unlock_irq(&cwq->lock);
509b89deed3SOleg Nesterov 
5103af24433SOleg Nesterov 	if (unlikely(running))
511b89deed3SOleg Nesterov 		wait_for_completion(&barr.done);
512b89deed3SOleg Nesterov }
513b89deed3SOleg Nesterov 
5146e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work)
515b89deed3SOleg Nesterov {
516b89deed3SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
51728e53bddSOleg Nesterov 	struct workqueue_struct *wq;
518e7577c50SRusty Russell 	const struct cpumask *cpu_map;
519b1f4ec17SOleg Nesterov 	int cpu;
520b89deed3SOleg Nesterov 
521f293ea92SOleg Nesterov 	might_sleep();
522f293ea92SOleg Nesterov 
5233295f0efSIngo Molnar 	lock_map_acquire(&work->lockdep_map);
5243295f0efSIngo Molnar 	lock_map_release(&work->lockdep_map);
5254e6045f1SJohannes Berg 
526b89deed3SOleg Nesterov 	cwq = get_wq_data(work);
527b89deed3SOleg Nesterov 	if (!cwq)
5283af24433SOleg Nesterov 		return;
529b89deed3SOleg Nesterov 
53028e53bddSOleg Nesterov 	wq = cwq->wq;
53128e53bddSOleg Nesterov 	cpu_map = wq_cpu_map(wq);
53228e53bddSOleg Nesterov 
533aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
5346e84d644SOleg Nesterov 		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
5356e84d644SOleg Nesterov }
5366e84d644SOleg Nesterov 
5371f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work,
5381f1f642eSOleg Nesterov 				struct timer_list* timer)
5391f1f642eSOleg Nesterov {
5401f1f642eSOleg Nesterov 	int ret;
5411f1f642eSOleg Nesterov 
5421f1f642eSOleg Nesterov 	do {
5431f1f642eSOleg Nesterov 		ret = (timer && likely(del_timer(timer)));
5441f1f642eSOleg Nesterov 		if (!ret)
5451f1f642eSOleg Nesterov 			ret = try_to_grab_pending(work);
5461f1f642eSOleg Nesterov 		wait_on_work(work);
5471f1f642eSOleg Nesterov 	} while (unlikely(ret < 0));
5481f1f642eSOleg Nesterov 
5491f1f642eSOleg Nesterov 	work_clear_pending(work);
5501f1f642eSOleg Nesterov 	return ret;
5511f1f642eSOleg Nesterov }
5521f1f642eSOleg Nesterov 
5536e84d644SOleg Nesterov /**
5546e84d644SOleg Nesterov  * cancel_work_sync - block until a work_struct's callback has terminated
5556e84d644SOleg Nesterov  * @work: the work which is to be flushed
5566e84d644SOleg Nesterov  *
5571f1f642eSOleg Nesterov  * Returns true if @work was pending.
5581f1f642eSOleg Nesterov  *
5596e84d644SOleg Nesterov  * cancel_work_sync() will cancel the work if it is queued. If the work's
5606e84d644SOleg Nesterov  * callback appears to be running, cancel_work_sync() will block until it
5616e84d644SOleg Nesterov  * has completed.
5626e84d644SOleg Nesterov  *
5636e84d644SOleg Nesterov  * It is possible to use this function if the work re-queues itself. It can
5646e84d644SOleg Nesterov  * cancel the work even if it migrates to another workqueue, however in that
5656e84d644SOleg Nesterov  * case it only guarantees that work->func() has completed on the last queued
5666e84d644SOleg Nesterov  * workqueue.
5676e84d644SOleg Nesterov  *
5686e84d644SOleg Nesterov  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
5696e84d644SOleg Nesterov  * pending, otherwise it goes into a busy-wait loop until the timer expires.
5706e84d644SOleg Nesterov  *
5716e84d644SOleg Nesterov  * The caller must ensure that workqueue_struct on which this work was last
5726e84d644SOleg Nesterov  * queued can't be destroyed before this function returns.
5736e84d644SOleg Nesterov  */
5741f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work)
5756e84d644SOleg Nesterov {
5761f1f642eSOleg Nesterov 	return __cancel_work_timer(work, NULL);
577b89deed3SOleg Nesterov }
57828e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync);
579b89deed3SOleg Nesterov 
5806e84d644SOleg Nesterov /**
581f5a421a4SOleg Nesterov  * cancel_delayed_work_sync - reliably kill off a delayed work.
5826e84d644SOleg Nesterov  * @dwork: the delayed work struct
5836e84d644SOleg Nesterov  *
5841f1f642eSOleg Nesterov  * Returns true if @dwork was pending.
5851f1f642eSOleg Nesterov  *
5866e84d644SOleg Nesterov  * It is possible to use this function if @dwork rearms itself via queue_work()
5876e84d644SOleg Nesterov  * or queue_delayed_work(). See also the comment for cancel_work_sync().
5886e84d644SOleg Nesterov  */
5891f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork)
5906e84d644SOleg Nesterov {
5911f1f642eSOleg Nesterov 	return __cancel_work_timer(&dwork->work, &dwork->timer);
5926e84d644SOleg Nesterov }
593f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync);
5941da177e4SLinus Torvalds 
5956e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly;
5961da177e4SLinus Torvalds 
5970fcb78c2SRolf Eike Beer /**
5980fcb78c2SRolf Eike Beer  * schedule_work - put work task in global workqueue
5990fcb78c2SRolf Eike Beer  * @work: job to be done
6000fcb78c2SRolf Eike Beer  *
6015b0f437dSBart Van Assche  * Returns zero if @work was already on the kernel-global workqueue and
6025b0f437dSBart Van Assche  * non-zero otherwise.
6035b0f437dSBart Van Assche  *
6045b0f437dSBart Van Assche  * This puts a job in the kernel-global workqueue if it was not already
6055b0f437dSBart Van Assche  * queued and leaves it in the same position on the kernel-global
6065b0f437dSBart Van Assche  * workqueue otherwise.
6070fcb78c2SRolf Eike Beer  */
6087ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work)
6091da177e4SLinus Torvalds {
6101da177e4SLinus Torvalds 	return queue_work(keventd_wq, work);
6111da177e4SLinus Torvalds }
612ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work);
6131da177e4SLinus Torvalds 
614c1a220e7SZhang Rui /*
615c1a220e7SZhang Rui  * schedule_work_on - put work task on a specific cpu
616c1a220e7SZhang Rui  * @cpu: cpu to put the work task on
617c1a220e7SZhang Rui  * @work: job to be done
618c1a220e7SZhang Rui  *
619c1a220e7SZhang Rui  * This puts a job on a specific cpu
620c1a220e7SZhang Rui  */
621c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work)
622c1a220e7SZhang Rui {
623c1a220e7SZhang Rui 	return queue_work_on(cpu, keventd_wq, work);
624c1a220e7SZhang Rui }
625c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on);
626c1a220e7SZhang Rui 
6270fcb78c2SRolf Eike Beer /**
6280fcb78c2SRolf Eike Beer  * schedule_delayed_work - put work task in global workqueue after delay
62952bad64dSDavid Howells  * @dwork: job to be done
63052bad64dSDavid Howells  * @delay: number of jiffies to wait or 0 for immediate execution
6310fcb78c2SRolf Eike Beer  *
6320fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
6330fcb78c2SRolf Eike Beer  * workqueue.
6340fcb78c2SRolf Eike Beer  */
6357ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork,
63682f67cd9SIngo Molnar 					unsigned long delay)
6371da177e4SLinus Torvalds {
63852bad64dSDavid Howells 	return queue_delayed_work(keventd_wq, dwork, delay);
6391da177e4SLinus Torvalds }
640ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work);
6411da177e4SLinus Torvalds 
6420fcb78c2SRolf Eike Beer /**
643*8c53e463SLinus Torvalds  * flush_delayed_work - block until a dwork_struct's callback has terminated
644*8c53e463SLinus Torvalds  * @dwork: the delayed work which is to be flushed
645*8c53e463SLinus Torvalds  *
646*8c53e463SLinus Torvalds  * Any timeout is cancelled, and any pending work is run immediately.
647*8c53e463SLinus Torvalds  */
648*8c53e463SLinus Torvalds void flush_delayed_work(struct delayed_work *dwork)
649*8c53e463SLinus Torvalds {
650*8c53e463SLinus Torvalds 	if (del_timer_sync(&dwork->timer)) {
651*8c53e463SLinus Torvalds 		struct cpu_workqueue_struct *cwq;
652*8c53e463SLinus Torvalds 		cwq = wq_per_cpu(keventd_wq, get_cpu());
653*8c53e463SLinus Torvalds 		__queue_work(cwq, &dwork->work);
654*8c53e463SLinus Torvalds 		put_cpu();
655*8c53e463SLinus Torvalds 	}
656*8c53e463SLinus Torvalds 	flush_work(&dwork->work);
657*8c53e463SLinus Torvalds }
658*8c53e463SLinus Torvalds EXPORT_SYMBOL(flush_delayed_work);
659*8c53e463SLinus Torvalds 
660*8c53e463SLinus Torvalds /**
6610fcb78c2SRolf Eike Beer  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
6620fcb78c2SRolf Eike Beer  * @cpu: cpu to use
66352bad64dSDavid Howells  * @dwork: job to be done
6640fcb78c2SRolf Eike Beer  * @delay: number of jiffies to wait
6650fcb78c2SRolf Eike Beer  *
6660fcb78c2SRolf Eike Beer  * After waiting for a given time this puts a job in the kernel-global
6670fcb78c2SRolf Eike Beer  * workqueue on the specified CPU.
6680fcb78c2SRolf Eike Beer  */
6691da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu,
67052bad64dSDavid Howells 			struct delayed_work *dwork, unsigned long delay)
6711da177e4SLinus Torvalds {
67252bad64dSDavid Howells 	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
6731da177e4SLinus Torvalds }
674ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on);
6751da177e4SLinus Torvalds 
676b6136773SAndrew Morton /**
677b6136773SAndrew Morton  * schedule_on_each_cpu - call a function on each online CPU from keventd
678b6136773SAndrew Morton  * @func: the function to call
679b6136773SAndrew Morton  *
680b6136773SAndrew Morton  * Returns zero on success.
681b6136773SAndrew Morton  * Returns -ve errno on failure.
682b6136773SAndrew Morton  *
683b6136773SAndrew Morton  * schedule_on_each_cpu() is very slow.
684b6136773SAndrew Morton  */
68565f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func)
68615316ba8SChristoph Lameter {
68715316ba8SChristoph Lameter 	int cpu;
688b6136773SAndrew Morton 	struct work_struct *works;
68915316ba8SChristoph Lameter 
690b6136773SAndrew Morton 	works = alloc_percpu(struct work_struct);
691b6136773SAndrew Morton 	if (!works)
69215316ba8SChristoph Lameter 		return -ENOMEM;
693b6136773SAndrew Morton 
69495402b38SGautham R Shenoy 	get_online_cpus();
69515316ba8SChristoph Lameter 	for_each_online_cpu(cpu) {
6969bfb1839SIngo Molnar 		struct work_struct *work = per_cpu_ptr(works, cpu);
6979bfb1839SIngo Molnar 
6989bfb1839SIngo Molnar 		INIT_WORK(work, func);
6998de6d308SOleg Nesterov 		schedule_work_on(cpu, work);
70015316ba8SChristoph Lameter 	}
7018616a89aSOleg Nesterov 	for_each_online_cpu(cpu)
7028616a89aSOleg Nesterov 		flush_work(per_cpu_ptr(works, cpu));
70395402b38SGautham R Shenoy 	put_online_cpus();
704b6136773SAndrew Morton 	free_percpu(works);
70515316ba8SChristoph Lameter 	return 0;
70615316ba8SChristoph Lameter }
70715316ba8SChristoph Lameter 
7081da177e4SLinus Torvalds void flush_scheduled_work(void)
7091da177e4SLinus Torvalds {
7101da177e4SLinus Torvalds 	flush_workqueue(keventd_wq);
7111da177e4SLinus Torvalds }
712ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work);
7131da177e4SLinus Torvalds 
7141da177e4SLinus Torvalds /**
7151fa44ecaSJames Bottomley  * execute_in_process_context - reliably execute the routine with user context
7161fa44ecaSJames Bottomley  * @fn:		the function to execute
7171fa44ecaSJames Bottomley  * @ew:		guaranteed storage for the execute work structure (must
7181fa44ecaSJames Bottomley  *		be available when the work executes)
7191fa44ecaSJames Bottomley  *
7201fa44ecaSJames Bottomley  * Executes the function immediately if process context is available,
7211fa44ecaSJames Bottomley  * otherwise schedules the function for delayed execution.
7221fa44ecaSJames Bottomley  *
7231fa44ecaSJames Bottomley  * Returns:	0 - function was executed
7241fa44ecaSJames Bottomley  *		1 - function was scheduled for execution
7251fa44ecaSJames Bottomley  */
72665f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew)
7271fa44ecaSJames Bottomley {
7281fa44ecaSJames Bottomley 	if (!in_interrupt()) {
72965f27f38SDavid Howells 		fn(&ew->work);
7301fa44ecaSJames Bottomley 		return 0;
7311fa44ecaSJames Bottomley 	}
7321fa44ecaSJames Bottomley 
73365f27f38SDavid Howells 	INIT_WORK(&ew->work, fn);
7341fa44ecaSJames Bottomley 	schedule_work(&ew->work);
7351fa44ecaSJames Bottomley 
7361fa44ecaSJames Bottomley 	return 1;
7371fa44ecaSJames Bottomley }
7381fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context);
7391fa44ecaSJames Bottomley 
7401da177e4SLinus Torvalds int keventd_up(void)
7411da177e4SLinus Torvalds {
7421da177e4SLinus Torvalds 	return keventd_wq != NULL;
7431da177e4SLinus Torvalds }
7441da177e4SLinus Torvalds 
7451da177e4SLinus Torvalds int current_is_keventd(void)
7461da177e4SLinus Torvalds {
7471da177e4SLinus Torvalds 	struct cpu_workqueue_struct *cwq;
748d243769dSHugh Dickins 	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
7491da177e4SLinus Torvalds 	int ret = 0;
7501da177e4SLinus Torvalds 
7511da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
7521da177e4SLinus Torvalds 
75389ada679SChristoph Lameter 	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
7541da177e4SLinus Torvalds 	if (current == cwq->thread)
7551da177e4SLinus Torvalds 		ret = 1;
7561da177e4SLinus Torvalds 
7571da177e4SLinus Torvalds 	return ret;
7581da177e4SLinus Torvalds 
7591da177e4SLinus Torvalds }
7601da177e4SLinus Torvalds 
7613af24433SOleg Nesterov static struct cpu_workqueue_struct *
7623af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
7631da177e4SLinus Torvalds {
76489ada679SChristoph Lameter 	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
7653af24433SOleg Nesterov 
7663af24433SOleg Nesterov 	cwq->wq = wq;
7673af24433SOleg Nesterov 	spin_lock_init(&cwq->lock);
7683af24433SOleg Nesterov 	INIT_LIST_HEAD(&cwq->worklist);
7693af24433SOleg Nesterov 	init_waitqueue_head(&cwq->more_work);
7703af24433SOleg Nesterov 
7713af24433SOleg Nesterov 	return cwq;
7723af24433SOleg Nesterov }
7733af24433SOleg Nesterov 
7743af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
7753af24433SOleg Nesterov {
7760d557dc9SHeiko Carstens 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
7773af24433SOleg Nesterov 	struct workqueue_struct *wq = cwq->wq;
7786cc88bc4SDavid Howells 	const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
7793af24433SOleg Nesterov 	struct task_struct *p;
7803af24433SOleg Nesterov 
7813af24433SOleg Nesterov 	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
7823af24433SOleg Nesterov 	/*
7833af24433SOleg Nesterov 	 * Nobody can add the work_struct to this cwq,
7843af24433SOleg Nesterov 	 *	if (caller is __create_workqueue)
7853af24433SOleg Nesterov 	 *		nobody should see this wq
7863af24433SOleg Nesterov 	 *	else // caller is CPU_UP_PREPARE
7873af24433SOleg Nesterov 	 *		cpu is not on cpu_online_map
7883af24433SOleg Nesterov 	 * so we can abort safely.
7893af24433SOleg Nesterov 	 */
7903af24433SOleg Nesterov 	if (IS_ERR(p))
7913af24433SOleg Nesterov 		return PTR_ERR(p);
7920d557dc9SHeiko Carstens 	if (cwq->wq->rt)
7930d557dc9SHeiko Carstens 		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
7943af24433SOleg Nesterov 	cwq->thread = p;
7953af24433SOleg Nesterov 
796e1d8aa9fSFrederic Weisbecker 	trace_workqueue_creation(cwq->thread, cpu);
797e1d8aa9fSFrederic Weisbecker 
7983af24433SOleg Nesterov 	return 0;
7993af24433SOleg Nesterov }
8003af24433SOleg Nesterov 
80106ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
80206ba38a9SOleg Nesterov {
80306ba38a9SOleg Nesterov 	struct task_struct *p = cwq->thread;
80406ba38a9SOleg Nesterov 
80506ba38a9SOleg Nesterov 	if (p != NULL) {
80606ba38a9SOleg Nesterov 		if (cpu >= 0)
80706ba38a9SOleg Nesterov 			kthread_bind(p, cpu);
80806ba38a9SOleg Nesterov 		wake_up_process(p);
80906ba38a9SOleg Nesterov 	}
81006ba38a9SOleg Nesterov }
81106ba38a9SOleg Nesterov 
8124e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name,
8134e6045f1SJohannes Berg 						int singlethread,
8144e6045f1SJohannes Berg 						int freezeable,
8150d557dc9SHeiko Carstens 						int rt,
816eb13ba87SJohannes Berg 						struct lock_class_key *key,
817eb13ba87SJohannes Berg 						const char *lock_name)
8183af24433SOleg Nesterov {
8193af24433SOleg Nesterov 	struct workqueue_struct *wq;
8203af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
8213af24433SOleg Nesterov 	int err = 0, cpu;
8223af24433SOleg Nesterov 
8233af24433SOleg Nesterov 	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
8243af24433SOleg Nesterov 	if (!wq)
8253af24433SOleg Nesterov 		return NULL;
8263af24433SOleg Nesterov 
8273af24433SOleg Nesterov 	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
8283af24433SOleg Nesterov 	if (!wq->cpu_wq) {
8293af24433SOleg Nesterov 		kfree(wq);
8303af24433SOleg Nesterov 		return NULL;
8313af24433SOleg Nesterov 	}
8323af24433SOleg Nesterov 
8333af24433SOleg Nesterov 	wq->name = name;
834eb13ba87SJohannes Berg 	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
835cce1a165SOleg Nesterov 	wq->singlethread = singlethread;
8363af24433SOleg Nesterov 	wq->freezeable = freezeable;
8370d557dc9SHeiko Carstens 	wq->rt = rt;
838cce1a165SOleg Nesterov 	INIT_LIST_HEAD(&wq->list);
8393af24433SOleg Nesterov 
8403af24433SOleg Nesterov 	if (singlethread) {
8413af24433SOleg Nesterov 		cwq = init_cpu_workqueue(wq, singlethread_cpu);
8423af24433SOleg Nesterov 		err = create_workqueue_thread(cwq, singlethread_cpu);
84306ba38a9SOleg Nesterov 		start_workqueue_thread(cwq, -1);
8443af24433SOleg Nesterov 	} else {
8453da1c84cSOleg Nesterov 		cpu_maps_update_begin();
8466af8bf3dSOleg Nesterov 		/*
8476af8bf3dSOleg Nesterov 		 * We must place this wq on list even if the code below fails.
8486af8bf3dSOleg Nesterov 		 * cpu_down(cpu) can remove cpu from cpu_populated_map before
8496af8bf3dSOleg Nesterov 		 * destroy_workqueue() takes the lock, in that case we leak
8506af8bf3dSOleg Nesterov 		 * cwq[cpu]->thread.
8516af8bf3dSOleg Nesterov 		 */
85295402b38SGautham R Shenoy 		spin_lock(&workqueue_lock);
8533af24433SOleg Nesterov 		list_add(&wq->list, &workqueues);
85495402b38SGautham R Shenoy 		spin_unlock(&workqueue_lock);
8556af8bf3dSOleg Nesterov 		/*
8566af8bf3dSOleg Nesterov 		 * We must initialize cwqs for each possible cpu even if we
8576af8bf3dSOleg Nesterov 		 * are going to call destroy_workqueue() finally. Otherwise
8586af8bf3dSOleg Nesterov 		 * cpu_up() can hit the uninitialized cwq once we drop the
8596af8bf3dSOleg Nesterov 		 * lock.
8606af8bf3dSOleg Nesterov 		 */
8613af24433SOleg Nesterov 		for_each_possible_cpu(cpu) {
8623af24433SOleg Nesterov 			cwq = init_cpu_workqueue(wq, cpu);
8633af24433SOleg Nesterov 			if (err || !cpu_online(cpu))
8643af24433SOleg Nesterov 				continue;
8653af24433SOleg Nesterov 			err = create_workqueue_thread(cwq, cpu);
86606ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
8673af24433SOleg Nesterov 		}
8683da1c84cSOleg Nesterov 		cpu_maps_update_done();
8693af24433SOleg Nesterov 	}
8703af24433SOleg Nesterov 
8713af24433SOleg Nesterov 	if (err) {
8723af24433SOleg Nesterov 		destroy_workqueue(wq);
8733af24433SOleg Nesterov 		wq = NULL;
8743af24433SOleg Nesterov 	}
8753af24433SOleg Nesterov 	return wq;
8763af24433SOleg Nesterov }
8774e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key);
8783af24433SOleg Nesterov 
8791e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
8803af24433SOleg Nesterov {
8813af24433SOleg Nesterov 	/*
8823da1c84cSOleg Nesterov 	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
8833da1c84cSOleg Nesterov 	 * cpu_add_remove_lock protects cwq->thread.
8843af24433SOleg Nesterov 	 */
88514441960SOleg Nesterov 	if (cwq->thread == NULL)
88614441960SOleg Nesterov 		return;
88714441960SOleg Nesterov 
8883295f0efSIngo Molnar 	lock_map_acquire(&cwq->wq->lockdep_map);
8893295f0efSIngo Molnar 	lock_map_release(&cwq->wq->lockdep_map);
8904e6045f1SJohannes Berg 
89113c22168SOleg Nesterov 	flush_cpu_workqueue(cwq);
89214441960SOleg Nesterov 	/*
8933da1c84cSOleg Nesterov 	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
89413c22168SOleg Nesterov 	 * a concurrent flush_workqueue() can insert a barrier after us.
89513c22168SOleg Nesterov 	 * However, in that case run_workqueue() won't return and check
89613c22168SOleg Nesterov 	 * kthread_should_stop() until it flushes all work_struct's.
89714441960SOleg Nesterov 	 * When ->worklist becomes empty it is safe to exit because no
89814441960SOleg Nesterov 	 * more work_structs can be queued on this cwq: flush_workqueue
89914441960SOleg Nesterov 	 * checks list_empty(), and a "normal" queue_work() can't use
90014441960SOleg Nesterov 	 * a dead CPU.
90114441960SOleg Nesterov 	 */
902e1d8aa9fSFrederic Weisbecker 	trace_workqueue_destruction(cwq->thread);
90314441960SOleg Nesterov 	kthread_stop(cwq->thread);
90414441960SOleg Nesterov 	cwq->thread = NULL;
9051da177e4SLinus Torvalds }
9061da177e4SLinus Torvalds 
9073af24433SOleg Nesterov /**
9083af24433SOleg Nesterov  * destroy_workqueue - safely terminate a workqueue
9093af24433SOleg Nesterov  * @wq: target workqueue
9103af24433SOleg Nesterov  *
9113af24433SOleg Nesterov  * Safely destroy a workqueue. All work currently pending will be done first.
9123af24433SOleg Nesterov  */
9133af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq)
9143af24433SOleg Nesterov {
915e7577c50SRusty Russell 	const struct cpumask *cpu_map = wq_cpu_map(wq);
9163af24433SOleg Nesterov 	int cpu;
9173af24433SOleg Nesterov 
9183da1c84cSOleg Nesterov 	cpu_maps_update_begin();
91995402b38SGautham R Shenoy 	spin_lock(&workqueue_lock);
9203af24433SOleg Nesterov 	list_del(&wq->list);
92195402b38SGautham R Shenoy 	spin_unlock(&workqueue_lock);
9223af24433SOleg Nesterov 
923aa85ea5bSRusty Russell 	for_each_cpu(cpu, cpu_map)
9241e35eaa2SOleg Nesterov 		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
9253da1c84cSOleg Nesterov  	cpu_maps_update_done();
9263af24433SOleg Nesterov 
9273af24433SOleg Nesterov 	free_percpu(wq->cpu_wq);
9283af24433SOleg Nesterov 	kfree(wq);
9293af24433SOleg Nesterov }
9303af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue);
9313af24433SOleg Nesterov 
9329c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
9331da177e4SLinus Torvalds 						unsigned long action,
9341da177e4SLinus Torvalds 						void *hcpu)
9351da177e4SLinus Torvalds {
9363af24433SOleg Nesterov 	unsigned int cpu = (unsigned long)hcpu;
9373af24433SOleg Nesterov 	struct cpu_workqueue_struct *cwq;
9381da177e4SLinus Torvalds 	struct workqueue_struct *wq;
9398448502cSOleg Nesterov 	int ret = NOTIFY_OK;
9401da177e4SLinus Torvalds 
9418bb78442SRafael J. Wysocki 	action &= ~CPU_TASKS_FROZEN;
9428bb78442SRafael J. Wysocki 
9431da177e4SLinus Torvalds 	switch (action) {
9443af24433SOleg Nesterov 	case CPU_UP_PREPARE:
945e7577c50SRusty Russell 		cpumask_set_cpu(cpu, cpu_populated_map);
9463af24433SOleg Nesterov 	}
9478448502cSOleg Nesterov undo:
9481da177e4SLinus Torvalds 	list_for_each_entry(wq, &workqueues, list) {
9493af24433SOleg Nesterov 		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
9503af24433SOleg Nesterov 
9513af24433SOleg Nesterov 		switch (action) {
9523af24433SOleg Nesterov 		case CPU_UP_PREPARE:
9533af24433SOleg Nesterov 			if (!create_workqueue_thread(cwq, cpu))
9541da177e4SLinus Torvalds 				break;
95595402b38SGautham R Shenoy 			printk(KERN_ERR "workqueue [%s] for %i failed\n",
95695402b38SGautham R Shenoy 				wq->name, cpu);
9578448502cSOleg Nesterov 			action = CPU_UP_CANCELED;
9588448502cSOleg Nesterov 			ret = NOTIFY_BAD;
9598448502cSOleg Nesterov 			goto undo;
9601da177e4SLinus Torvalds 
9611da177e4SLinus Torvalds 		case CPU_ONLINE:
96206ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, cpu);
9631da177e4SLinus Torvalds 			break;
9641da177e4SLinus Torvalds 
9651da177e4SLinus Torvalds 		case CPU_UP_CANCELED:
96606ba38a9SOleg Nesterov 			start_workqueue_thread(cwq, -1);
9673da1c84cSOleg Nesterov 		case CPU_POST_DEAD:
9681e35eaa2SOleg Nesterov 			cleanup_workqueue_thread(cwq);
9691da177e4SLinus Torvalds 			break;
9701da177e4SLinus Torvalds 		}
9713af24433SOleg Nesterov 	}
9721da177e4SLinus Torvalds 
97300dfcaf7SOleg Nesterov 	switch (action) {
97400dfcaf7SOleg Nesterov 	case CPU_UP_CANCELED:
9753da1c84cSOleg Nesterov 	case CPU_POST_DEAD:
976e7577c50SRusty Russell 		cpumask_clear_cpu(cpu, cpu_populated_map);
97700dfcaf7SOleg Nesterov 	}
97800dfcaf7SOleg Nesterov 
9798448502cSOleg Nesterov 	return ret;
9801da177e4SLinus Torvalds }
9811da177e4SLinus Torvalds 
9822d3854a3SRusty Russell #ifdef CONFIG_SMP
9838ccad40dSRusty Russell 
9842d3854a3SRusty Russell struct work_for_cpu {
9856b44003eSAndrew Morton 	struct completion completion;
9862d3854a3SRusty Russell 	long (*fn)(void *);
9872d3854a3SRusty Russell 	void *arg;
9882d3854a3SRusty Russell 	long ret;
9892d3854a3SRusty Russell };
9902d3854a3SRusty Russell 
9916b44003eSAndrew Morton static int do_work_for_cpu(void *_wfc)
9922d3854a3SRusty Russell {
9936b44003eSAndrew Morton 	struct work_for_cpu *wfc = _wfc;
9942d3854a3SRusty Russell 	wfc->ret = wfc->fn(wfc->arg);
9956b44003eSAndrew Morton 	complete(&wfc->completion);
9966b44003eSAndrew Morton 	return 0;
9972d3854a3SRusty Russell }
9982d3854a3SRusty Russell 
9992d3854a3SRusty Russell /**
10002d3854a3SRusty Russell  * work_on_cpu - run a function in user context on a particular cpu
10012d3854a3SRusty Russell  * @cpu: the cpu to run on
10022d3854a3SRusty Russell  * @fn: the function to run
10032d3854a3SRusty Russell  * @arg: the function arg
10042d3854a3SRusty Russell  *
100531ad9081SRusty Russell  * This will return the value @fn returns.
100631ad9081SRusty Russell  * It is up to the caller to ensure that the cpu doesn't go offline.
10076b44003eSAndrew Morton  * The caller must not hold any locks which would prevent @fn from completing.
10082d3854a3SRusty Russell  */
10092d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
10102d3854a3SRusty Russell {
10116b44003eSAndrew Morton 	struct task_struct *sub_thread;
10126b44003eSAndrew Morton 	struct work_for_cpu wfc = {
10136b44003eSAndrew Morton 		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
10146b44003eSAndrew Morton 		.fn = fn,
10156b44003eSAndrew Morton 		.arg = arg,
10166b44003eSAndrew Morton 	};
10172d3854a3SRusty Russell 
10186b44003eSAndrew Morton 	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
10196b44003eSAndrew Morton 	if (IS_ERR(sub_thread))
10206b44003eSAndrew Morton 		return PTR_ERR(sub_thread);
10216b44003eSAndrew Morton 	kthread_bind(sub_thread, cpu);
10226b44003eSAndrew Morton 	wake_up_process(sub_thread);
10236b44003eSAndrew Morton 	wait_for_completion(&wfc.completion);
10242d3854a3SRusty Russell 	return wfc.ret;
10252d3854a3SRusty Russell }
10262d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu);
10272d3854a3SRusty Russell #endif /* CONFIG_SMP */
10282d3854a3SRusty Russell 
1029c12920d1SOleg Nesterov void __init init_workqueues(void)
10301da177e4SLinus Torvalds {
1031e7577c50SRusty Russell 	alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1032e7577c50SRusty Russell 
1033e7577c50SRusty Russell 	cpumask_copy(cpu_populated_map, cpu_online_mask);
1034e7577c50SRusty Russell 	singlethread_cpu = cpumask_first(cpu_possible_mask);
1035e7577c50SRusty Russell 	cpu_singlethread_map = cpumask_of(singlethread_cpu);
10361da177e4SLinus Torvalds 	hotcpu_notifier(workqueue_cpu_callback, 0);
10371da177e4SLinus Torvalds 	keventd_wq = create_workqueue("events");
10381da177e4SLinus Torvalds 	BUG_ON(!keventd_wq);
10391da177e4SLinus Torvalds }
1040