11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <[email protected]> 121da177e4SLinus Torvalds * Andrew Morton <[email protected]> 131da177e4SLinus Torvalds * Kai Petzke <[email protected]> 141da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <[email protected]>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds /* 37f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 38f756d5e2SNathan Lynch * possible cpu). 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds struct cpu_workqueue_struct { 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds spinlock_t lock; 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds struct list_head worklist; 451da177e4SLinus Torvalds wait_queue_head_t more_work; 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds struct workqueue_struct *wq; 4836c8b586SIngo Molnar struct task_struct *thread; 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 51341a5958SRafael J. Wysocki 52341a5958SRafael J. Wysocki int freezeable; /* Freeze the thread during suspend */ 531da177e4SLinus Torvalds } ____cacheline_aligned; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 561da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 571da177e4SLinus Torvalds * per-CPU workqueues: 581da177e4SLinus Torvalds */ 591da177e4SLinus Torvalds struct workqueue_struct { 6089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 611da177e4SLinus Torvalds const char *name; 621da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 631da177e4SLinus Torvalds }; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 661da177e4SLinus Torvalds threads to each one as cpus come/go. */ 679b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex); 681da177e4SLinus Torvalds static LIST_HEAD(workqueues); 691da177e4SLinus Torvalds 70f756d5e2SNathan Lynch static int singlethread_cpu; 71f756d5e2SNathan Lynch 721da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 731da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds return list_empty(&wq->list); 761da177e4SLinus Torvalds } 771da177e4SLinus Torvalds 784594bf15SDavid Howells /* 794594bf15SDavid Howells * Set the workqueue on which a work item is to be run 804594bf15SDavid Howells * - Must *only* be called if the pending flag is set 814594bf15SDavid Howells */ 82365970a1SDavid Howells static inline void set_wq_data(struct work_struct *work, void *wq) 83365970a1SDavid Howells { 844594bf15SDavid Howells unsigned long new; 85365970a1SDavid Howells 864594bf15SDavid Howells BUG_ON(!work_pending(work)); 874594bf15SDavid Howells 88365970a1SDavid Howells new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 89a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 90a08727baSLinus Torvalds atomic_long_set(&work->data, new); 91365970a1SDavid Howells } 92365970a1SDavid Howells 93365970a1SDavid Howells static inline void *get_wq_data(struct work_struct *work) 94365970a1SDavid Howells { 95a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 96365970a1SDavid Howells } 97365970a1SDavid Howells 9868380b58SLinus Torvalds static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 9968380b58SLinus Torvalds { 10068380b58SLinus Torvalds int ret = 0; 10168380b58SLinus Torvalds unsigned long flags; 10268380b58SLinus Torvalds 10368380b58SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 10468380b58SLinus Torvalds /* 10568380b58SLinus Torvalds * We need to re-validate the work info after we've gotten 10668380b58SLinus Torvalds * the cpu_workqueue lock. We can run the work now iff: 10768380b58SLinus Torvalds * 10868380b58SLinus Torvalds * - the wq_data still matches the cpu_workqueue_struct 10968380b58SLinus Torvalds * - AND the work is still marked pending 11068380b58SLinus Torvalds * - AND the work is still on a list (which will be this 11168380b58SLinus Torvalds * workqueue_struct list) 11268380b58SLinus Torvalds * 11368380b58SLinus Torvalds * All these conditions are important, because we 11468380b58SLinus Torvalds * need to protect against the work being run right 11568380b58SLinus Torvalds * now on another CPU (all but the last one might be 11668380b58SLinus Torvalds * true if it's currently running and has not been 11768380b58SLinus Torvalds * released yet, for example). 11868380b58SLinus Torvalds */ 11968380b58SLinus Torvalds if (get_wq_data(work) == cwq 12068380b58SLinus Torvalds && work_pending(work) 12168380b58SLinus Torvalds && !list_empty(&work->entry)) { 12268380b58SLinus Torvalds work_func_t f = work->func; 12368380b58SLinus Torvalds list_del_init(&work->entry); 12468380b58SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 12568380b58SLinus Torvalds 126a08727baSLinus Torvalds if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 12768380b58SLinus Torvalds work_release(work); 12868380b58SLinus Torvalds f(work); 12968380b58SLinus Torvalds 13068380b58SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 13168380b58SLinus Torvalds ret = 1; 13268380b58SLinus Torvalds } 13368380b58SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 13468380b58SLinus Torvalds return ret; 13568380b58SLinus Torvalds } 13668380b58SLinus Torvalds 13768380b58SLinus Torvalds /** 13868380b58SLinus Torvalds * run_scheduled_work - run scheduled work synchronously 13968380b58SLinus Torvalds * @work: work to run 14068380b58SLinus Torvalds * 14168380b58SLinus Torvalds * This checks if the work was pending, and runs it 14268380b58SLinus Torvalds * synchronously if so. It returns a boolean to indicate 14368380b58SLinus Torvalds * whether it had any scheduled work to run or not. 14468380b58SLinus Torvalds * 14568380b58SLinus Torvalds * NOTE! This _only_ works for normal work_structs. You 14668380b58SLinus Torvalds * CANNOT use this for delayed work, because the wq data 14768380b58SLinus Torvalds * for delayed work will not point properly to the per- 14868380b58SLinus Torvalds * CPU workqueue struct, but will change! 14968380b58SLinus Torvalds */ 15068380b58SLinus Torvalds int fastcall run_scheduled_work(struct work_struct *work) 15168380b58SLinus Torvalds { 15268380b58SLinus Torvalds for (;;) { 15368380b58SLinus Torvalds struct cpu_workqueue_struct *cwq; 15468380b58SLinus Torvalds 15568380b58SLinus Torvalds if (!work_pending(work)) 15668380b58SLinus Torvalds return 0; 15768380b58SLinus Torvalds if (list_empty(&work->entry)) 15868380b58SLinus Torvalds return 0; 15968380b58SLinus Torvalds /* NOTE! This depends intimately on __queue_work! */ 16068380b58SLinus Torvalds cwq = get_wq_data(work); 16168380b58SLinus Torvalds if (!cwq) 16268380b58SLinus Torvalds return 0; 16368380b58SLinus Torvalds if (__run_work(cwq, work)) 16468380b58SLinus Torvalds return 1; 16568380b58SLinus Torvalds } 16668380b58SLinus Torvalds } 16768380b58SLinus Torvalds EXPORT_SYMBOL(run_scheduled_work); 16868380b58SLinus Torvalds 1691da177e4SLinus Torvalds /* Preempt must be disabled. */ 1701da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1711da177e4SLinus Torvalds struct work_struct *work) 1721da177e4SLinus Torvalds { 1731da177e4SLinus Torvalds unsigned long flags; 1741da177e4SLinus Torvalds 1751da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 176365970a1SDavid Howells set_wq_data(work, cwq); 1771da177e4SLinus Torvalds list_add_tail(&work->entry, &cwq->worklist); 1781da177e4SLinus Torvalds wake_up(&cwq->more_work); 1791da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds 1820fcb78c2SRolf Eike Beer /** 1830fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1840fcb78c2SRolf Eike Beer * @wq: workqueue to use 1850fcb78c2SRolf Eike Beer * @work: work to queue 1860fcb78c2SRolf Eike Beer * 187057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1881da177e4SLinus Torvalds * 1891da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 1901da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 1911da177e4SLinus Torvalds */ 1921da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 1931da177e4SLinus Torvalds { 1941da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 1951da177e4SLinus Torvalds 196a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 1971da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 198f756d5e2SNathan Lynch cpu = singlethread_cpu; 1991da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 20089ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 2011da177e4SLinus Torvalds ret = 1; 2021da177e4SLinus Torvalds } 2031da177e4SLinus Torvalds put_cpu(); 2041da177e4SLinus Torvalds return ret; 2051da177e4SLinus Torvalds } 206ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 2071da177e4SLinus Torvalds 20882f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data) 2091da177e4SLinus Torvalds { 21052bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 211365970a1SDavid Howells struct workqueue_struct *wq = get_wq_data(&dwork->work); 2121da177e4SLinus Torvalds int cpu = smp_processor_id(); 2131da177e4SLinus Torvalds 2141da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 215f756d5e2SNathan Lynch cpu = singlethread_cpu; 2161da177e4SLinus Torvalds 21752bad64dSDavid Howells __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); 2181da177e4SLinus Torvalds } 2191da177e4SLinus Torvalds 2200fcb78c2SRolf Eike Beer /** 2210fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 2220fcb78c2SRolf Eike Beer * @wq: workqueue to use 223af9997e4SRandy Dunlap * @dwork: delayable work to queue 2240fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2250fcb78c2SRolf Eike Beer * 226057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2270fcb78c2SRolf Eike Beer */ 2281da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 22952bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2301da177e4SLinus Torvalds { 2311da177e4SLinus Torvalds int ret = 0; 23252bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 23352bad64dSDavid Howells struct work_struct *work = &dwork->work; 23452bad64dSDavid Howells 23582f67cd9SIngo Molnar timer_stats_timer_set_start_info(timer); 23652bad64dSDavid Howells if (delay == 0) 23752bad64dSDavid Howells return queue_work(wq, work); 2381da177e4SLinus Torvalds 239a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2401da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 2411da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 244365970a1SDavid Howells set_wq_data(work, wq); 2451da177e4SLinus Torvalds timer->expires = jiffies + delay; 24652bad64dSDavid Howells timer->data = (unsigned long)dwork; 2471da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 2481da177e4SLinus Torvalds add_timer(timer); 2491da177e4SLinus Torvalds ret = 1; 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds return ret; 2521da177e4SLinus Torvalds } 253ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 2541da177e4SLinus Torvalds 2550fcb78c2SRolf Eike Beer /** 2560fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 2570fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 2580fcb78c2SRolf Eike Beer * @wq: workqueue to use 259af9997e4SRandy Dunlap * @dwork: work to queue 2600fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2610fcb78c2SRolf Eike Beer * 262057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2630fcb78c2SRolf Eike Beer */ 2647a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 26552bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2667a6bc1cdSVenkatesh Pallipadi { 2677a6bc1cdSVenkatesh Pallipadi int ret = 0; 26852bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 26952bad64dSDavid Howells struct work_struct *work = &dwork->work; 2707a6bc1cdSVenkatesh Pallipadi 271a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2727a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2737a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2747a6bc1cdSVenkatesh Pallipadi 2757a6bc1cdSVenkatesh Pallipadi /* This stores wq for the moment, for the timer_fn */ 276365970a1SDavid Howells set_wq_data(work, wq); 2777a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 27852bad64dSDavid Howells timer->data = (unsigned long)dwork; 2797a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 2807a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 2817a6bc1cdSVenkatesh Pallipadi ret = 1; 2827a6bc1cdSVenkatesh Pallipadi } 2837a6bc1cdSVenkatesh Pallipadi return ret; 2847a6bc1cdSVenkatesh Pallipadi } 285ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2861da177e4SLinus Torvalds 287858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 2881da177e4SLinus Torvalds { 2891da177e4SLinus Torvalds unsigned long flags; 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds /* 2921da177e4SLinus Torvalds * Keep taking off work from the queue until 2931da177e4SLinus Torvalds * done. 2941da177e4SLinus Torvalds */ 2951da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2961da177e4SLinus Torvalds cwq->run_depth++; 2971da177e4SLinus Torvalds if (cwq->run_depth > 3) { 2981da177e4SLinus Torvalds /* morton gets to eat his hat */ 2991da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 3001da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 3011da177e4SLinus Torvalds dump_stack(); 3021da177e4SLinus Torvalds } 3031da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 3041da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 3051da177e4SLinus Torvalds struct work_struct, entry); 3066bb49e59SDavid Howells work_func_t f = work->func; 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 3091da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 3101da177e4SLinus Torvalds 311365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 312a08727baSLinus Torvalds if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 31365f27f38SDavid Howells work_release(work); 31465f27f38SDavid Howells f(work); 3151da177e4SLinus Torvalds 316d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 317d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 318d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 319d5abe669SPeter Zijlstra current->comm, preempt_count(), 320d5abe669SPeter Zijlstra current->pid); 321d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 322d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 323d5abe669SPeter Zijlstra debug_show_held_locks(current); 324d5abe669SPeter Zijlstra dump_stack(); 325d5abe669SPeter Zijlstra } 326d5abe669SPeter Zijlstra 3271da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 3281da177e4SLinus Torvalds } 3291da177e4SLinus Torvalds cwq->run_depth--; 3301da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 3311da177e4SLinus Torvalds } 3321da177e4SLinus Torvalds 3331da177e4SLinus Torvalds static int worker_thread(void *__cwq) 3341da177e4SLinus Torvalds { 3351da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 3361da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current); 3371da177e4SLinus Torvalds struct k_sigaction sa; 3381da177e4SLinus Torvalds sigset_t blocked; 3391da177e4SLinus Torvalds 340341a5958SRafael J. Wysocki if (!cwq->freezeable) 3411da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 3421da177e4SLinus Torvalds 3431da177e4SLinus Torvalds set_user_nice(current, -5); 3441da177e4SLinus Torvalds 3451da177e4SLinus Torvalds /* Block and flush all signals */ 3461da177e4SLinus Torvalds sigfillset(&blocked); 3471da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 3481da177e4SLinus Torvalds flush_signals(current); 3491da177e4SLinus Torvalds 35046934023SChristoph Lameter /* 35146934023SChristoph Lameter * We inherited MPOL_INTERLEAVE from the booting kernel. 35246934023SChristoph Lameter * Set MPOL_DEFAULT to insure node local allocations. 35346934023SChristoph Lameter */ 35446934023SChristoph Lameter numa_default_policy(); 35546934023SChristoph Lameter 3561da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 3571da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 3581da177e4SLinus Torvalds sa.sa.sa_flags = 0; 3591da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 3601da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3631da177e4SLinus Torvalds while (!kthread_should_stop()) { 364341a5958SRafael J. Wysocki if (cwq->freezeable) 365341a5958SRafael J. Wysocki try_to_freeze(); 366341a5958SRafael J. Wysocki 3671da177e4SLinus Torvalds add_wait_queue(&cwq->more_work, &wait); 3681da177e4SLinus Torvalds if (list_empty(&cwq->worklist)) 3691da177e4SLinus Torvalds schedule(); 3701da177e4SLinus Torvalds else 3711da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3721da177e4SLinus Torvalds remove_wait_queue(&cwq->more_work, &wait); 3731da177e4SLinus Torvalds 3741da177e4SLinus Torvalds if (!list_empty(&cwq->worklist)) 3751da177e4SLinus Torvalds run_workqueue(cwq); 3761da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3771da177e4SLinus Torvalds } 3781da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3791da177e4SLinus Torvalds return 0; 3801da177e4SLinus Torvalds } 3811da177e4SLinus Torvalds 382*fc2e4d70SOleg Nesterov struct wq_barrier { 383*fc2e4d70SOleg Nesterov struct work_struct work; 384*fc2e4d70SOleg Nesterov struct completion done; 385*fc2e4d70SOleg Nesterov }; 386*fc2e4d70SOleg Nesterov 387*fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 388*fc2e4d70SOleg Nesterov { 389*fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 390*fc2e4d70SOleg Nesterov complete(&barr->done); 391*fc2e4d70SOleg Nesterov } 392*fc2e4d70SOleg Nesterov 393*fc2e4d70SOleg Nesterov static inline void init_wq_barrier(struct wq_barrier *barr) 394*fc2e4d70SOleg Nesterov { 395*fc2e4d70SOleg Nesterov INIT_WORK(&barr->work, wq_barrier_func); 396*fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 397*fc2e4d70SOleg Nesterov 398*fc2e4d70SOleg Nesterov init_completion(&barr->done); 399*fc2e4d70SOleg Nesterov } 400*fc2e4d70SOleg Nesterov 4011da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 4021da177e4SLinus Torvalds { 4031da177e4SLinus Torvalds if (cwq->thread == current) { 4041da177e4SLinus Torvalds /* 4051da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 4061da177e4SLinus Torvalds * it by hand rather than deadlocking. 4071da177e4SLinus Torvalds */ 408*fc2e4d70SOleg Nesterov mutex_unlock(&workqueue_mutex); 4091da177e4SLinus Torvalds run_workqueue(cwq); 410*fc2e4d70SOleg Nesterov mutex_lock(&workqueue_mutex); 4111da177e4SLinus Torvalds } else { 412*fc2e4d70SOleg Nesterov struct wq_barrier barr; 4131da177e4SLinus Torvalds 414*fc2e4d70SOleg Nesterov init_wq_barrier(&barr); 415*fc2e4d70SOleg Nesterov __queue_work(cwq, &barr.work); 4161da177e4SLinus Torvalds 417*fc2e4d70SOleg Nesterov mutex_unlock(&workqueue_mutex); 418*fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 419*fc2e4d70SOleg Nesterov mutex_lock(&workqueue_mutex); 4201da177e4SLinus Torvalds } 4211da177e4SLinus Torvalds } 4221da177e4SLinus Torvalds 4230fcb78c2SRolf Eike Beer /** 4241da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 4250fcb78c2SRolf Eike Beer * @wq: workqueue to flush 4261da177e4SLinus Torvalds * 4271da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 4281da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 4291da177e4SLinus Torvalds * 430*fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 431*fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 4321da177e4SLinus Torvalds * 4331da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 4341da177e4SLinus Torvalds * helper threads to do it. 4351da177e4SLinus Torvalds */ 4361da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 4371da177e4SLinus Torvalds { 438*fc2e4d70SOleg Nesterov mutex_lock(&workqueue_mutex); 4391da177e4SLinus Torvalds if (is_single_threaded(wq)) { 440bce61dd4SBen Collins /* Always use first cpu's area. */ 441f756d5e2SNathan Lynch flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 4421da177e4SLinus Torvalds } else { 4431da177e4SLinus Torvalds int cpu; 4441da177e4SLinus Torvalds 4451da177e4SLinus Torvalds for_each_online_cpu(cpu) 44689ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 4471da177e4SLinus Torvalds } 448*fc2e4d70SOleg Nesterov mutex_unlock(&workqueue_mutex); 4491da177e4SLinus Torvalds } 450ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 453341a5958SRafael J. Wysocki int cpu, int freezeable) 4541da177e4SLinus Torvalds { 45589ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 4561da177e4SLinus Torvalds struct task_struct *p; 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds spin_lock_init(&cwq->lock); 4591da177e4SLinus Torvalds cwq->wq = wq; 4601da177e4SLinus Torvalds cwq->thread = NULL; 461341a5958SRafael J. Wysocki cwq->freezeable = freezeable; 4621da177e4SLinus Torvalds INIT_LIST_HEAD(&cwq->worklist); 4631da177e4SLinus Torvalds init_waitqueue_head(&cwq->more_work); 4641da177e4SLinus Torvalds 4651da177e4SLinus Torvalds if (is_single_threaded(wq)) 4661da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s", wq->name); 4671da177e4SLinus Torvalds else 4681da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); 4691da177e4SLinus Torvalds if (IS_ERR(p)) 4701da177e4SLinus Torvalds return NULL; 4711da177e4SLinus Torvalds cwq->thread = p; 4721da177e4SLinus Torvalds return p; 4731da177e4SLinus Torvalds } 4741da177e4SLinus Torvalds 4751da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name, 476341a5958SRafael J. Wysocki int singlethread, int freezeable) 4771da177e4SLinus Torvalds { 4781da177e4SLinus Torvalds int cpu, destroy = 0; 4791da177e4SLinus Torvalds struct workqueue_struct *wq; 4801da177e4SLinus Torvalds struct task_struct *p; 4811da177e4SLinus Torvalds 482dd392710SPekka J Enberg wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4831da177e4SLinus Torvalds if (!wq) 4841da177e4SLinus Torvalds return NULL; 4851da177e4SLinus Torvalds 48689ada679SChristoph Lameter wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 487676121fcSBen Collins if (!wq->cpu_wq) { 488676121fcSBen Collins kfree(wq); 489676121fcSBen Collins return NULL; 490676121fcSBen Collins } 491676121fcSBen Collins 4921da177e4SLinus Torvalds wq->name = name; 4939b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 4941da177e4SLinus Torvalds if (singlethread) { 4951da177e4SLinus Torvalds INIT_LIST_HEAD(&wq->list); 496341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, singlethread_cpu, freezeable); 4971da177e4SLinus Torvalds if (!p) 4981da177e4SLinus Torvalds destroy = 1; 4991da177e4SLinus Torvalds else 5001da177e4SLinus Torvalds wake_up_process(p); 5011da177e4SLinus Torvalds } else { 5021da177e4SLinus Torvalds list_add(&wq->list, &workqueues); 5031da177e4SLinus Torvalds for_each_online_cpu(cpu) { 504341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, cpu, freezeable); 5051da177e4SLinus Torvalds if (p) { 5061da177e4SLinus Torvalds kthread_bind(p, cpu); 5071da177e4SLinus Torvalds wake_up_process(p); 5081da177e4SLinus Torvalds } else 5091da177e4SLinus Torvalds destroy = 1; 5101da177e4SLinus Torvalds } 5111da177e4SLinus Torvalds } 5129b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 5131da177e4SLinus Torvalds 5141da177e4SLinus Torvalds /* 5151da177e4SLinus Torvalds * Was there any error during startup? If yes then clean up: 5161da177e4SLinus Torvalds */ 5171da177e4SLinus Torvalds if (destroy) { 5181da177e4SLinus Torvalds destroy_workqueue(wq); 5191da177e4SLinus Torvalds wq = NULL; 5201da177e4SLinus Torvalds } 5211da177e4SLinus Torvalds return wq; 5221da177e4SLinus Torvalds } 523ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue); 5241da177e4SLinus Torvalds 5251da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 5261da177e4SLinus Torvalds { 5271da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 5281da177e4SLinus Torvalds unsigned long flags; 5291da177e4SLinus Torvalds struct task_struct *p; 5301da177e4SLinus Torvalds 53189ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, cpu); 5321da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 5331da177e4SLinus Torvalds p = cwq->thread; 5341da177e4SLinus Torvalds cwq->thread = NULL; 5351da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 5361da177e4SLinus Torvalds if (p) 5371da177e4SLinus Torvalds kthread_stop(p); 5381da177e4SLinus Torvalds } 5391da177e4SLinus Torvalds 5400fcb78c2SRolf Eike Beer /** 5410fcb78c2SRolf Eike Beer * destroy_workqueue - safely terminate a workqueue 5420fcb78c2SRolf Eike Beer * @wq: target workqueue 5430fcb78c2SRolf Eike Beer * 5440fcb78c2SRolf Eike Beer * Safely destroy a workqueue. All work currently pending will be done first. 5450fcb78c2SRolf Eike Beer */ 5461da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq) 5471da177e4SLinus Torvalds { 5481da177e4SLinus Torvalds int cpu; 5491da177e4SLinus Torvalds 5501da177e4SLinus Torvalds flush_workqueue(wq); 5511da177e4SLinus Torvalds 5521da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 5539b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 5541da177e4SLinus Torvalds if (is_single_threaded(wq)) 555f756d5e2SNathan Lynch cleanup_workqueue_thread(wq, singlethread_cpu); 5561da177e4SLinus Torvalds else { 5571da177e4SLinus Torvalds for_each_online_cpu(cpu) 5581da177e4SLinus Torvalds cleanup_workqueue_thread(wq, cpu); 5591da177e4SLinus Torvalds list_del(&wq->list); 5601da177e4SLinus Torvalds } 5619b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 56289ada679SChristoph Lameter free_percpu(wq->cpu_wq); 5631da177e4SLinus Torvalds kfree(wq); 5641da177e4SLinus Torvalds } 565ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue); 5661da177e4SLinus Torvalds 5671da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 5681da177e4SLinus Torvalds 5690fcb78c2SRolf Eike Beer /** 5700fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 5710fcb78c2SRolf Eike Beer * @work: job to be done 5720fcb78c2SRolf Eike Beer * 5730fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 5740fcb78c2SRolf Eike Beer */ 5751da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 5761da177e4SLinus Torvalds { 5771da177e4SLinus Torvalds return queue_work(keventd_wq, work); 5781da177e4SLinus Torvalds } 579ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 5801da177e4SLinus Torvalds 5810fcb78c2SRolf Eike Beer /** 5820fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 58352bad64dSDavid Howells * @dwork: job to be done 58452bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 5850fcb78c2SRolf Eike Beer * 5860fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5870fcb78c2SRolf Eike Beer * workqueue. 5880fcb78c2SRolf Eike Beer */ 58982f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork, 59082f67cd9SIngo Molnar unsigned long delay) 5911da177e4SLinus Torvalds { 59282f67cd9SIngo Molnar timer_stats_timer_set_start_info(&dwork->timer); 59352bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 5941da177e4SLinus Torvalds } 595ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 5961da177e4SLinus Torvalds 5970fcb78c2SRolf Eike Beer /** 5980fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 5990fcb78c2SRolf Eike Beer * @cpu: cpu to use 60052bad64dSDavid Howells * @dwork: job to be done 6010fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 6020fcb78c2SRolf Eike Beer * 6030fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 6040fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 6050fcb78c2SRolf Eike Beer */ 6061da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 60752bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 6081da177e4SLinus Torvalds { 60952bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 6101da177e4SLinus Torvalds } 611ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 6121da177e4SLinus Torvalds 613b6136773SAndrew Morton /** 614b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 615b6136773SAndrew Morton * @func: the function to call 616b6136773SAndrew Morton * 617b6136773SAndrew Morton * Returns zero on success. 618b6136773SAndrew Morton * Returns -ve errno on failure. 619b6136773SAndrew Morton * 620b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 621b6136773SAndrew Morton * 622b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 623b6136773SAndrew Morton */ 62465f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 62515316ba8SChristoph Lameter { 62615316ba8SChristoph Lameter int cpu; 627b6136773SAndrew Morton struct work_struct *works; 62815316ba8SChristoph Lameter 629b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 630b6136773SAndrew Morton if (!works) 63115316ba8SChristoph Lameter return -ENOMEM; 632b6136773SAndrew Morton 633e18f3ffbSAndrew Morton preempt_disable(); /* CPU hotplug */ 63415316ba8SChristoph Lameter for_each_online_cpu(cpu) { 6359bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 6369bfb1839SIngo Molnar 6379bfb1839SIngo Molnar INIT_WORK(work, func); 6389bfb1839SIngo Molnar set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 6399bfb1839SIngo Molnar __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); 64015316ba8SChristoph Lameter } 641e18f3ffbSAndrew Morton preempt_enable(); 64215316ba8SChristoph Lameter flush_workqueue(keventd_wq); 643b6136773SAndrew Morton free_percpu(works); 64415316ba8SChristoph Lameter return 0; 64515316ba8SChristoph Lameter } 64615316ba8SChristoph Lameter 6471da177e4SLinus Torvalds void flush_scheduled_work(void) 6481da177e4SLinus Torvalds { 6491da177e4SLinus Torvalds flush_workqueue(keventd_wq); 6501da177e4SLinus Torvalds } 651ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 6521da177e4SLinus Torvalds 6531da177e4SLinus Torvalds /** 65472fd4a35SRobert P. J. Day * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. 6551da177e4SLinus Torvalds * @wq: the controlling workqueue structure 65652bad64dSDavid Howells * @dwork: the delayed work struct 6571da177e4SLinus Torvalds */ 65881ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 65952bad64dSDavid Howells struct delayed_work *dwork) 6601da177e4SLinus Torvalds { 66152bad64dSDavid Howells while (!cancel_delayed_work(dwork)) 6621da177e4SLinus Torvalds flush_workqueue(wq); 6631da177e4SLinus Torvalds } 66481ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 6651da177e4SLinus Torvalds 6661da177e4SLinus Torvalds /** 66772fd4a35SRobert P. J. Day * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. 66852bad64dSDavid Howells * @dwork: the delayed work struct 6691da177e4SLinus Torvalds */ 67052bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork) 6711da177e4SLinus Torvalds { 67252bad64dSDavid Howells cancel_rearming_delayed_workqueue(keventd_wq, dwork); 6731da177e4SLinus Torvalds } 6741da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 6751da177e4SLinus Torvalds 6761fa44ecaSJames Bottomley /** 6771fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 6781fa44ecaSJames Bottomley * @fn: the function to execute 6791fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 6801fa44ecaSJames Bottomley * be available when the work executes) 6811fa44ecaSJames Bottomley * 6821fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 6831fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 6841fa44ecaSJames Bottomley * 6851fa44ecaSJames Bottomley * Returns: 0 - function was executed 6861fa44ecaSJames Bottomley * 1 - function was scheduled for execution 6871fa44ecaSJames Bottomley */ 68865f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 6891fa44ecaSJames Bottomley { 6901fa44ecaSJames Bottomley if (!in_interrupt()) { 69165f27f38SDavid Howells fn(&ew->work); 6921fa44ecaSJames Bottomley return 0; 6931fa44ecaSJames Bottomley } 6941fa44ecaSJames Bottomley 69565f27f38SDavid Howells INIT_WORK(&ew->work, fn); 6961fa44ecaSJames Bottomley schedule_work(&ew->work); 6971fa44ecaSJames Bottomley 6981fa44ecaSJames Bottomley return 1; 6991fa44ecaSJames Bottomley } 7001fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 7011fa44ecaSJames Bottomley 7021da177e4SLinus Torvalds int keventd_up(void) 7031da177e4SLinus Torvalds { 7041da177e4SLinus Torvalds return keventd_wq != NULL; 7051da177e4SLinus Torvalds } 7061da177e4SLinus Torvalds 7071da177e4SLinus Torvalds int current_is_keventd(void) 7081da177e4SLinus Torvalds { 7091da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 7101da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 7111da177e4SLinus Torvalds int ret = 0; 7121da177e4SLinus Torvalds 7131da177e4SLinus Torvalds BUG_ON(!keventd_wq); 7141da177e4SLinus Torvalds 71589ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 7161da177e4SLinus Torvalds if (current == cwq->thread) 7171da177e4SLinus Torvalds ret = 1; 7181da177e4SLinus Torvalds 7191da177e4SLinus Torvalds return ret; 7201da177e4SLinus Torvalds 7211da177e4SLinus Torvalds } 7221da177e4SLinus Torvalds 7231da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */ 7241da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 7251da177e4SLinus Torvalds { 72689ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 727626ab0e6SOleg Nesterov struct list_head list; 7281da177e4SLinus Torvalds struct work_struct *work; 7291da177e4SLinus Torvalds 7301da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 731626ab0e6SOleg Nesterov list_replace_init(&cwq->worklist, &list); 7321da177e4SLinus Torvalds 7331da177e4SLinus Torvalds while (!list_empty(&list)) { 7341da177e4SLinus Torvalds printk("Taking work for %s\n", wq->name); 7351da177e4SLinus Torvalds work = list_entry(list.next,struct work_struct,entry); 7361da177e4SLinus Torvalds list_del(&work->entry); 73789ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); 7381da177e4SLinus Torvalds } 7391da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 7401da177e4SLinus Torvalds } 7411da177e4SLinus Torvalds 7421da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */ 7439c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 7441da177e4SLinus Torvalds unsigned long action, 7451da177e4SLinus Torvalds void *hcpu) 7461da177e4SLinus Torvalds { 7471da177e4SLinus Torvalds unsigned int hotcpu = (unsigned long)hcpu; 7481da177e4SLinus Torvalds struct workqueue_struct *wq; 7491da177e4SLinus Torvalds 7501da177e4SLinus Torvalds switch (action) { 7511da177e4SLinus Torvalds case CPU_UP_PREPARE: 7529b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7531da177e4SLinus Torvalds /* Create a new workqueue thread for it. */ 7541da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 755341a5958SRafael J. Wysocki if (!create_workqueue_thread(wq, hotcpu, 0)) { 7561da177e4SLinus Torvalds printk("workqueue for %i failed\n", hotcpu); 7571da177e4SLinus Torvalds return NOTIFY_BAD; 7581da177e4SLinus Torvalds } 7591da177e4SLinus Torvalds } 7601da177e4SLinus Torvalds break; 7611da177e4SLinus Torvalds 7621da177e4SLinus Torvalds case CPU_ONLINE: 7631da177e4SLinus Torvalds /* Kick off worker threads. */ 7641da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 76589ada679SChristoph Lameter struct cpu_workqueue_struct *cwq; 76689ada679SChristoph Lameter 76789ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); 76889ada679SChristoph Lameter kthread_bind(cwq->thread, hotcpu); 76989ada679SChristoph Lameter wake_up_process(cwq->thread); 7701da177e4SLinus Torvalds } 7719b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7721da177e4SLinus Torvalds break; 7731da177e4SLinus Torvalds 7741da177e4SLinus Torvalds case CPU_UP_CANCELED: 7751da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 776fc75cdfaSHeiko Carstens if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) 777fc75cdfaSHeiko Carstens continue; 7781da177e4SLinus Torvalds /* Unbind so it can run. */ 77989ada679SChristoph Lameter kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 780a4c4af7cSHeiko Carstens any_online_cpu(cpu_online_map)); 7811da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 7821da177e4SLinus Torvalds } 7839b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7849b41ea72SAndrew Morton break; 7859b41ea72SAndrew Morton 7869b41ea72SAndrew Morton case CPU_DOWN_PREPARE: 7879b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7889b41ea72SAndrew Morton break; 7899b41ea72SAndrew Morton 7909b41ea72SAndrew Morton case CPU_DOWN_FAILED: 7919b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7921da177e4SLinus Torvalds break; 7931da177e4SLinus Torvalds 7941da177e4SLinus Torvalds case CPU_DEAD: 7951da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 7961da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 7971da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 7981da177e4SLinus Torvalds take_over_work(wq, hotcpu); 7999b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 8001da177e4SLinus Torvalds break; 8011da177e4SLinus Torvalds } 8021da177e4SLinus Torvalds 8031da177e4SLinus Torvalds return NOTIFY_OK; 8041da177e4SLinus Torvalds } 8051da177e4SLinus Torvalds 8061da177e4SLinus Torvalds void init_workqueues(void) 8071da177e4SLinus Torvalds { 808f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 8091da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 8101da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 8111da177e4SLinus Torvalds BUG_ON(!keventd_wq); 8121da177e4SLinus Torvalds } 8131da177e4SLinus Torvalds 814