11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <[email protected]> 121da177e4SLinus Torvalds * Andrew Morton <[email protected]> 131da177e4SLinus Torvalds * Kai Petzke <[email protected]> 141da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <[email protected]>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds /* 37f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 38f756d5e2SNathan Lynch * possible cpu). 391da177e4SLinus Torvalds * 401da177e4SLinus Torvalds * The sequence counters are for flush_scheduled_work(). It wants to wait 419f5d785eSRolf Eike Beer * until all currently-scheduled works are completed, but it doesn't 421da177e4SLinus Torvalds * want to be livelocked by new, incoming ones. So it waits until 431da177e4SLinus Torvalds * remove_sequence is >= the insert_sequence which pertained when 441da177e4SLinus Torvalds * flush_scheduled_work() was called. 451da177e4SLinus Torvalds */ 461da177e4SLinus Torvalds struct cpu_workqueue_struct { 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds spinlock_t lock; 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds long remove_sequence; /* Least-recently added (next to run) */ 511da177e4SLinus Torvalds long insert_sequence; /* Next to add */ 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds struct list_head worklist; 541da177e4SLinus Torvalds wait_queue_head_t more_work; 551da177e4SLinus Torvalds wait_queue_head_t work_done; 561da177e4SLinus Torvalds 571da177e4SLinus Torvalds struct workqueue_struct *wq; 5836c8b586SIngo Molnar struct task_struct *thread; 591da177e4SLinus Torvalds 601da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 61341a5958SRafael J. Wysocki 62341a5958SRafael J. Wysocki int freezeable; /* Freeze the thread during suspend */ 631da177e4SLinus Torvalds } ____cacheline_aligned; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds /* 661da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 671da177e4SLinus Torvalds * per-CPU workqueues: 681da177e4SLinus Torvalds */ 691da177e4SLinus Torvalds struct workqueue_struct { 7089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 711da177e4SLinus Torvalds const char *name; 721da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 731da177e4SLinus Torvalds }; 741da177e4SLinus Torvalds 751da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 761da177e4SLinus Torvalds threads to each one as cpus come/go. */ 779b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex); 781da177e4SLinus Torvalds static LIST_HEAD(workqueues); 791da177e4SLinus Torvalds 80f756d5e2SNathan Lynch static int singlethread_cpu; 81f756d5e2SNathan Lynch 821da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 831da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 841da177e4SLinus Torvalds { 851da177e4SLinus Torvalds return list_empty(&wq->list); 861da177e4SLinus Torvalds } 871da177e4SLinus Torvalds 884594bf15SDavid Howells /* 894594bf15SDavid Howells * Set the workqueue on which a work item is to be run 904594bf15SDavid Howells * - Must *only* be called if the pending flag is set 914594bf15SDavid Howells */ 92365970a1SDavid Howells static inline void set_wq_data(struct work_struct *work, void *wq) 93365970a1SDavid Howells { 944594bf15SDavid Howells unsigned long new; 95365970a1SDavid Howells 964594bf15SDavid Howells BUG_ON(!work_pending(work)); 974594bf15SDavid Howells 98365970a1SDavid Howells new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 99a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 100a08727baSLinus Torvalds atomic_long_set(&work->data, new); 101365970a1SDavid Howells } 102365970a1SDavid Howells 103365970a1SDavid Howells static inline void *get_wq_data(struct work_struct *work) 104365970a1SDavid Howells { 105a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 106365970a1SDavid Howells } 107365970a1SDavid Howells 10868380b58SLinus Torvalds static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 10968380b58SLinus Torvalds { 11068380b58SLinus Torvalds int ret = 0; 11168380b58SLinus Torvalds unsigned long flags; 11268380b58SLinus Torvalds 11368380b58SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 11468380b58SLinus Torvalds /* 11568380b58SLinus Torvalds * We need to re-validate the work info after we've gotten 11668380b58SLinus Torvalds * the cpu_workqueue lock. We can run the work now iff: 11768380b58SLinus Torvalds * 11868380b58SLinus Torvalds * - the wq_data still matches the cpu_workqueue_struct 11968380b58SLinus Torvalds * - AND the work is still marked pending 12068380b58SLinus Torvalds * - AND the work is still on a list (which will be this 12168380b58SLinus Torvalds * workqueue_struct list) 12268380b58SLinus Torvalds * 12368380b58SLinus Torvalds * All these conditions are important, because we 12468380b58SLinus Torvalds * need to protect against the work being run right 12568380b58SLinus Torvalds * now on another CPU (all but the last one might be 12668380b58SLinus Torvalds * true if it's currently running and has not been 12768380b58SLinus Torvalds * released yet, for example). 12868380b58SLinus Torvalds */ 12968380b58SLinus Torvalds if (get_wq_data(work) == cwq 13068380b58SLinus Torvalds && work_pending(work) 13168380b58SLinus Torvalds && !list_empty(&work->entry)) { 13268380b58SLinus Torvalds work_func_t f = work->func; 13368380b58SLinus Torvalds list_del_init(&work->entry); 13468380b58SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 13568380b58SLinus Torvalds 136a08727baSLinus Torvalds if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 13768380b58SLinus Torvalds work_release(work); 13868380b58SLinus Torvalds f(work); 13968380b58SLinus Torvalds 14068380b58SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 14168380b58SLinus Torvalds cwq->remove_sequence++; 14268380b58SLinus Torvalds wake_up(&cwq->work_done); 14368380b58SLinus Torvalds ret = 1; 14468380b58SLinus Torvalds } 14568380b58SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 14668380b58SLinus Torvalds return ret; 14768380b58SLinus Torvalds } 14868380b58SLinus Torvalds 14968380b58SLinus Torvalds /** 15068380b58SLinus Torvalds * run_scheduled_work - run scheduled work synchronously 15168380b58SLinus Torvalds * @work: work to run 15268380b58SLinus Torvalds * 15368380b58SLinus Torvalds * This checks if the work was pending, and runs it 15468380b58SLinus Torvalds * synchronously if so. It returns a boolean to indicate 15568380b58SLinus Torvalds * whether it had any scheduled work to run or not. 15668380b58SLinus Torvalds * 15768380b58SLinus Torvalds * NOTE! This _only_ works for normal work_structs. You 15868380b58SLinus Torvalds * CANNOT use this for delayed work, because the wq data 15968380b58SLinus Torvalds * for delayed work will not point properly to the per- 16068380b58SLinus Torvalds * CPU workqueue struct, but will change! 16168380b58SLinus Torvalds */ 16268380b58SLinus Torvalds int fastcall run_scheduled_work(struct work_struct *work) 16368380b58SLinus Torvalds { 16468380b58SLinus Torvalds for (;;) { 16568380b58SLinus Torvalds struct cpu_workqueue_struct *cwq; 16668380b58SLinus Torvalds 16768380b58SLinus Torvalds if (!work_pending(work)) 16868380b58SLinus Torvalds return 0; 16968380b58SLinus Torvalds if (list_empty(&work->entry)) 17068380b58SLinus Torvalds return 0; 17168380b58SLinus Torvalds /* NOTE! This depends intimately on __queue_work! */ 17268380b58SLinus Torvalds cwq = get_wq_data(work); 17368380b58SLinus Torvalds if (!cwq) 17468380b58SLinus Torvalds return 0; 17568380b58SLinus Torvalds if (__run_work(cwq, work)) 17668380b58SLinus Torvalds return 1; 17768380b58SLinus Torvalds } 17868380b58SLinus Torvalds } 17968380b58SLinus Torvalds EXPORT_SYMBOL(run_scheduled_work); 18068380b58SLinus Torvalds 1811da177e4SLinus Torvalds /* Preempt must be disabled. */ 1821da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1831da177e4SLinus Torvalds struct work_struct *work) 1841da177e4SLinus Torvalds { 1851da177e4SLinus Torvalds unsigned long flags; 1861da177e4SLinus Torvalds 1871da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 188365970a1SDavid Howells set_wq_data(work, cwq); 1891da177e4SLinus Torvalds list_add_tail(&work->entry, &cwq->worklist); 1901da177e4SLinus Torvalds cwq->insert_sequence++; 1911da177e4SLinus Torvalds wake_up(&cwq->more_work); 1921da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1931da177e4SLinus Torvalds } 1941da177e4SLinus Torvalds 1950fcb78c2SRolf Eike Beer /** 1960fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1970fcb78c2SRolf Eike Beer * @wq: workqueue to use 1980fcb78c2SRolf Eike Beer * @work: work to queue 1990fcb78c2SRolf Eike Beer * 200057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2011da177e4SLinus Torvalds * 2021da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 2031da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 2041da177e4SLinus Torvalds */ 2051da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 2061da177e4SLinus Torvalds { 2071da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 2081da177e4SLinus Torvalds 209a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2101da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 211f756d5e2SNathan Lynch cpu = singlethread_cpu; 2121da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 21389ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 2141da177e4SLinus Torvalds ret = 1; 2151da177e4SLinus Torvalds } 2161da177e4SLinus Torvalds put_cpu(); 2171da177e4SLinus Torvalds return ret; 2181da177e4SLinus Torvalds } 219ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 2201da177e4SLinus Torvalds 22182f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data) 2221da177e4SLinus Torvalds { 22352bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 224365970a1SDavid Howells struct workqueue_struct *wq = get_wq_data(&dwork->work); 2251da177e4SLinus Torvalds int cpu = smp_processor_id(); 2261da177e4SLinus Torvalds 2271da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 228f756d5e2SNathan Lynch cpu = singlethread_cpu; 2291da177e4SLinus Torvalds 23052bad64dSDavid Howells __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); 2311da177e4SLinus Torvalds } 2321da177e4SLinus Torvalds 2330fcb78c2SRolf Eike Beer /** 2340fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 2350fcb78c2SRolf Eike Beer * @wq: workqueue to use 236af9997e4SRandy Dunlap * @dwork: delayable work to queue 2370fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2380fcb78c2SRolf Eike Beer * 239057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2400fcb78c2SRolf Eike Beer */ 2411da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 24252bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2431da177e4SLinus Torvalds { 2441da177e4SLinus Torvalds int ret = 0; 24552bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 24652bad64dSDavid Howells struct work_struct *work = &dwork->work; 24752bad64dSDavid Howells 24882f67cd9SIngo Molnar timer_stats_timer_set_start_info(timer); 24952bad64dSDavid Howells if (delay == 0) 25052bad64dSDavid Howells return queue_work(wq, work); 2511da177e4SLinus Torvalds 252a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2531da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 2541da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 2551da177e4SLinus Torvalds 2561da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 257365970a1SDavid Howells set_wq_data(work, wq); 2581da177e4SLinus Torvalds timer->expires = jiffies + delay; 25952bad64dSDavid Howells timer->data = (unsigned long)dwork; 2601da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 2611da177e4SLinus Torvalds add_timer(timer); 2621da177e4SLinus Torvalds ret = 1; 2631da177e4SLinus Torvalds } 2641da177e4SLinus Torvalds return ret; 2651da177e4SLinus Torvalds } 266ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 2671da177e4SLinus Torvalds 2680fcb78c2SRolf Eike Beer /** 2690fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 2700fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 2710fcb78c2SRolf Eike Beer * @wq: workqueue to use 272af9997e4SRandy Dunlap * @dwork: work to queue 2730fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2740fcb78c2SRolf Eike Beer * 275057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2760fcb78c2SRolf Eike Beer */ 2777a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 27852bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2797a6bc1cdSVenkatesh Pallipadi { 2807a6bc1cdSVenkatesh Pallipadi int ret = 0; 28152bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 28252bad64dSDavid Howells struct work_struct *work = &dwork->work; 2837a6bc1cdSVenkatesh Pallipadi 284a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2857a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2867a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2877a6bc1cdSVenkatesh Pallipadi 2887a6bc1cdSVenkatesh Pallipadi /* This stores wq for the moment, for the timer_fn */ 289365970a1SDavid Howells set_wq_data(work, wq); 2907a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 29152bad64dSDavid Howells timer->data = (unsigned long)dwork; 2927a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 2937a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 2947a6bc1cdSVenkatesh Pallipadi ret = 1; 2957a6bc1cdSVenkatesh Pallipadi } 2967a6bc1cdSVenkatesh Pallipadi return ret; 2977a6bc1cdSVenkatesh Pallipadi } 298ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2991da177e4SLinus Torvalds 300858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 3011da177e4SLinus Torvalds { 3021da177e4SLinus Torvalds unsigned long flags; 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds /* 3051da177e4SLinus Torvalds * Keep taking off work from the queue until 3061da177e4SLinus Torvalds * done. 3071da177e4SLinus Torvalds */ 3081da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 3091da177e4SLinus Torvalds cwq->run_depth++; 3101da177e4SLinus Torvalds if (cwq->run_depth > 3) { 3111da177e4SLinus Torvalds /* morton gets to eat his hat */ 3121da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 3131da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 3141da177e4SLinus Torvalds dump_stack(); 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 3171da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 3181da177e4SLinus Torvalds struct work_struct, entry); 3196bb49e59SDavid Howells work_func_t f = work->func; 3201da177e4SLinus Torvalds 3211da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 3221da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 3231da177e4SLinus Torvalds 324365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 325a08727baSLinus Torvalds if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 32665f27f38SDavid Howells work_release(work); 32765f27f38SDavid Howells f(work); 3281da177e4SLinus Torvalds 329d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 330d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 331d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 332d5abe669SPeter Zijlstra current->comm, preempt_count(), 333d5abe669SPeter Zijlstra current->pid); 334d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 335d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 336d5abe669SPeter Zijlstra debug_show_held_locks(current); 337d5abe669SPeter Zijlstra dump_stack(); 338d5abe669SPeter Zijlstra } 339d5abe669SPeter Zijlstra 3401da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 3411da177e4SLinus Torvalds cwq->remove_sequence++; 3421da177e4SLinus Torvalds wake_up(&cwq->work_done); 3431da177e4SLinus Torvalds } 3441da177e4SLinus Torvalds cwq->run_depth--; 3451da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 3461da177e4SLinus Torvalds } 3471da177e4SLinus Torvalds 3481da177e4SLinus Torvalds static int worker_thread(void *__cwq) 3491da177e4SLinus Torvalds { 3501da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 3511da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current); 3521da177e4SLinus Torvalds struct k_sigaction sa; 3531da177e4SLinus Torvalds sigset_t blocked; 3541da177e4SLinus Torvalds 355341a5958SRafael J. Wysocki if (!cwq->freezeable) 3561da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds set_user_nice(current, -5); 3591da177e4SLinus Torvalds 3601da177e4SLinus Torvalds /* Block and flush all signals */ 3611da177e4SLinus Torvalds sigfillset(&blocked); 3621da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 3631da177e4SLinus Torvalds flush_signals(current); 3641da177e4SLinus Torvalds 36546934023SChristoph Lameter /* 36646934023SChristoph Lameter * We inherited MPOL_INTERLEAVE from the booting kernel. 36746934023SChristoph Lameter * Set MPOL_DEFAULT to insure node local allocations. 36846934023SChristoph Lameter */ 36946934023SChristoph Lameter numa_default_policy(); 37046934023SChristoph Lameter 3711da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 3721da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 3731da177e4SLinus Torvalds sa.sa.sa_flags = 0; 3741da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 3751da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 3761da177e4SLinus Torvalds 3771da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3781da177e4SLinus Torvalds while (!kthread_should_stop()) { 379341a5958SRafael J. Wysocki if (cwq->freezeable) 380341a5958SRafael J. Wysocki try_to_freeze(); 381341a5958SRafael J. Wysocki 3821da177e4SLinus Torvalds add_wait_queue(&cwq->more_work, &wait); 3831da177e4SLinus Torvalds if (list_empty(&cwq->worklist)) 3841da177e4SLinus Torvalds schedule(); 3851da177e4SLinus Torvalds else 3861da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3871da177e4SLinus Torvalds remove_wait_queue(&cwq->more_work, &wait); 3881da177e4SLinus Torvalds 3891da177e4SLinus Torvalds if (!list_empty(&cwq->worklist)) 3901da177e4SLinus Torvalds run_workqueue(cwq); 3911da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3921da177e4SLinus Torvalds } 3931da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3941da177e4SLinus Torvalds return 0; 3951da177e4SLinus Torvalds } 3961da177e4SLinus Torvalds 3971da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 3981da177e4SLinus Torvalds { 3991da177e4SLinus Torvalds if (cwq->thread == current) { 4001da177e4SLinus Torvalds /* 4011da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 4021da177e4SLinus Torvalds * it by hand rather than deadlocking. 4031da177e4SLinus Torvalds */ 4041da177e4SLinus Torvalds run_workqueue(cwq); 4051da177e4SLinus Torvalds } else { 4061da177e4SLinus Torvalds DEFINE_WAIT(wait); 4071da177e4SLinus Torvalds long sequence_needed; 4081da177e4SLinus Torvalds 4091da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 4101da177e4SLinus Torvalds sequence_needed = cwq->insert_sequence; 4111da177e4SLinus Torvalds 4121da177e4SLinus Torvalds while (sequence_needed - cwq->remove_sequence > 0) { 4131da177e4SLinus Torvalds prepare_to_wait(&cwq->work_done, &wait, 4141da177e4SLinus Torvalds TASK_UNINTERRUPTIBLE); 4151da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 4161da177e4SLinus Torvalds schedule(); 4171da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 4181da177e4SLinus Torvalds } 4191da177e4SLinus Torvalds finish_wait(&cwq->work_done, &wait); 4201da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 4211da177e4SLinus Torvalds } 4221da177e4SLinus Torvalds } 4231da177e4SLinus Torvalds 4240fcb78c2SRolf Eike Beer /** 4251da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 4260fcb78c2SRolf Eike Beer * @wq: workqueue to flush 4271da177e4SLinus Torvalds * 4281da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 4291da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 4301da177e4SLinus Torvalds * 4311da177e4SLinus Torvalds * This function will sample each workqueue's current insert_sequence number and 4321da177e4SLinus Torvalds * will sleep until the head sequence is greater than or equal to that. This 4331da177e4SLinus Torvalds * means that we sleep until all works which were queued on entry have been 4341da177e4SLinus Torvalds * handled, but we are not livelocked by new incoming ones. 4351da177e4SLinus Torvalds * 4361da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 4371da177e4SLinus Torvalds * helper threads to do it. 4381da177e4SLinus Torvalds */ 4391da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 4401da177e4SLinus Torvalds { 4411da177e4SLinus Torvalds might_sleep(); 4421da177e4SLinus Torvalds 4431da177e4SLinus Torvalds if (is_single_threaded(wq)) { 444bce61dd4SBen Collins /* Always use first cpu's area. */ 445f756d5e2SNathan Lynch flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 4461da177e4SLinus Torvalds } else { 4471da177e4SLinus Torvalds int cpu; 4481da177e4SLinus Torvalds 4499b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 4501da177e4SLinus Torvalds for_each_online_cpu(cpu) 45189ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 4529b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 4531da177e4SLinus Torvalds } 4541da177e4SLinus Torvalds } 455ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 4561da177e4SLinus Torvalds 4571da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 458341a5958SRafael J. Wysocki int cpu, int freezeable) 4591da177e4SLinus Torvalds { 46089ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 4611da177e4SLinus Torvalds struct task_struct *p; 4621da177e4SLinus Torvalds 4631da177e4SLinus Torvalds spin_lock_init(&cwq->lock); 4641da177e4SLinus Torvalds cwq->wq = wq; 4651da177e4SLinus Torvalds cwq->thread = NULL; 4661da177e4SLinus Torvalds cwq->insert_sequence = 0; 4671da177e4SLinus Torvalds cwq->remove_sequence = 0; 468341a5958SRafael J. Wysocki cwq->freezeable = freezeable; 4691da177e4SLinus Torvalds INIT_LIST_HEAD(&cwq->worklist); 4701da177e4SLinus Torvalds init_waitqueue_head(&cwq->more_work); 4711da177e4SLinus Torvalds init_waitqueue_head(&cwq->work_done); 4721da177e4SLinus Torvalds 4731da177e4SLinus Torvalds if (is_single_threaded(wq)) 4741da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s", wq->name); 4751da177e4SLinus Torvalds else 4761da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); 4771da177e4SLinus Torvalds if (IS_ERR(p)) 4781da177e4SLinus Torvalds return NULL; 4791da177e4SLinus Torvalds cwq->thread = p; 4801da177e4SLinus Torvalds return p; 4811da177e4SLinus Torvalds } 4821da177e4SLinus Torvalds 4831da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name, 484341a5958SRafael J. Wysocki int singlethread, int freezeable) 4851da177e4SLinus Torvalds { 4861da177e4SLinus Torvalds int cpu, destroy = 0; 4871da177e4SLinus Torvalds struct workqueue_struct *wq; 4881da177e4SLinus Torvalds struct task_struct *p; 4891da177e4SLinus Torvalds 490dd392710SPekka J Enberg wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4911da177e4SLinus Torvalds if (!wq) 4921da177e4SLinus Torvalds return NULL; 4931da177e4SLinus Torvalds 49489ada679SChristoph Lameter wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 495676121fcSBen Collins if (!wq->cpu_wq) { 496676121fcSBen Collins kfree(wq); 497676121fcSBen Collins return NULL; 498676121fcSBen Collins } 499676121fcSBen Collins 5001da177e4SLinus Torvalds wq->name = name; 5019b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 5021da177e4SLinus Torvalds if (singlethread) { 5031da177e4SLinus Torvalds INIT_LIST_HEAD(&wq->list); 504341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, singlethread_cpu, freezeable); 5051da177e4SLinus Torvalds if (!p) 5061da177e4SLinus Torvalds destroy = 1; 5071da177e4SLinus Torvalds else 5081da177e4SLinus Torvalds wake_up_process(p); 5091da177e4SLinus Torvalds } else { 5101da177e4SLinus Torvalds list_add(&wq->list, &workqueues); 5111da177e4SLinus Torvalds for_each_online_cpu(cpu) { 512341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, cpu, freezeable); 5131da177e4SLinus Torvalds if (p) { 5141da177e4SLinus Torvalds kthread_bind(p, cpu); 5151da177e4SLinus Torvalds wake_up_process(p); 5161da177e4SLinus Torvalds } else 5171da177e4SLinus Torvalds destroy = 1; 5181da177e4SLinus Torvalds } 5191da177e4SLinus Torvalds } 5209b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 5211da177e4SLinus Torvalds 5221da177e4SLinus Torvalds /* 5231da177e4SLinus Torvalds * Was there any error during startup? If yes then clean up: 5241da177e4SLinus Torvalds */ 5251da177e4SLinus Torvalds if (destroy) { 5261da177e4SLinus Torvalds destroy_workqueue(wq); 5271da177e4SLinus Torvalds wq = NULL; 5281da177e4SLinus Torvalds } 5291da177e4SLinus Torvalds return wq; 5301da177e4SLinus Torvalds } 531ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue); 5321da177e4SLinus Torvalds 5331da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 5341da177e4SLinus Torvalds { 5351da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 5361da177e4SLinus Torvalds unsigned long flags; 5371da177e4SLinus Torvalds struct task_struct *p; 5381da177e4SLinus Torvalds 53989ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, cpu); 5401da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 5411da177e4SLinus Torvalds p = cwq->thread; 5421da177e4SLinus Torvalds cwq->thread = NULL; 5431da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 5441da177e4SLinus Torvalds if (p) 5451da177e4SLinus Torvalds kthread_stop(p); 5461da177e4SLinus Torvalds } 5471da177e4SLinus Torvalds 5480fcb78c2SRolf Eike Beer /** 5490fcb78c2SRolf Eike Beer * destroy_workqueue - safely terminate a workqueue 5500fcb78c2SRolf Eike Beer * @wq: target workqueue 5510fcb78c2SRolf Eike Beer * 5520fcb78c2SRolf Eike Beer * Safely destroy a workqueue. All work currently pending will be done first. 5530fcb78c2SRolf Eike Beer */ 5541da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq) 5551da177e4SLinus Torvalds { 5561da177e4SLinus Torvalds int cpu; 5571da177e4SLinus Torvalds 5581da177e4SLinus Torvalds flush_workqueue(wq); 5591da177e4SLinus Torvalds 5601da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 5619b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 5621da177e4SLinus Torvalds if (is_single_threaded(wq)) 563f756d5e2SNathan Lynch cleanup_workqueue_thread(wq, singlethread_cpu); 5641da177e4SLinus Torvalds else { 5651da177e4SLinus Torvalds for_each_online_cpu(cpu) 5661da177e4SLinus Torvalds cleanup_workqueue_thread(wq, cpu); 5671da177e4SLinus Torvalds list_del(&wq->list); 5681da177e4SLinus Torvalds } 5699b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 57089ada679SChristoph Lameter free_percpu(wq->cpu_wq); 5711da177e4SLinus Torvalds kfree(wq); 5721da177e4SLinus Torvalds } 573ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue); 5741da177e4SLinus Torvalds 5751da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 5761da177e4SLinus Torvalds 5770fcb78c2SRolf Eike Beer /** 5780fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 5790fcb78c2SRolf Eike Beer * @work: job to be done 5800fcb78c2SRolf Eike Beer * 5810fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 5820fcb78c2SRolf Eike Beer */ 5831da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 5841da177e4SLinus Torvalds { 5851da177e4SLinus Torvalds return queue_work(keventd_wq, work); 5861da177e4SLinus Torvalds } 587ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 5881da177e4SLinus Torvalds 5890fcb78c2SRolf Eike Beer /** 5900fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 59152bad64dSDavid Howells * @dwork: job to be done 59252bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 5930fcb78c2SRolf Eike Beer * 5940fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5950fcb78c2SRolf Eike Beer * workqueue. 5960fcb78c2SRolf Eike Beer */ 59782f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork, 59882f67cd9SIngo Molnar unsigned long delay) 5991da177e4SLinus Torvalds { 60082f67cd9SIngo Molnar timer_stats_timer_set_start_info(&dwork->timer); 60152bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 6021da177e4SLinus Torvalds } 603ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 6041da177e4SLinus Torvalds 6050fcb78c2SRolf Eike Beer /** 6060fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 6070fcb78c2SRolf Eike Beer * @cpu: cpu to use 60852bad64dSDavid Howells * @dwork: job to be done 6090fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 6100fcb78c2SRolf Eike Beer * 6110fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 6120fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 6130fcb78c2SRolf Eike Beer */ 6141da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 61552bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 6161da177e4SLinus Torvalds { 61752bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 6181da177e4SLinus Torvalds } 619ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 6201da177e4SLinus Torvalds 621b6136773SAndrew Morton /** 622b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 623b6136773SAndrew Morton * @func: the function to call 624b6136773SAndrew Morton * 625b6136773SAndrew Morton * Returns zero on success. 626b6136773SAndrew Morton * Returns -ve errno on failure. 627b6136773SAndrew Morton * 628b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 629b6136773SAndrew Morton * 630b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 631b6136773SAndrew Morton */ 63265f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 63315316ba8SChristoph Lameter { 63415316ba8SChristoph Lameter int cpu; 635b6136773SAndrew Morton struct work_struct *works; 63615316ba8SChristoph Lameter 637b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 638b6136773SAndrew Morton if (!works) 63915316ba8SChristoph Lameter return -ENOMEM; 640b6136773SAndrew Morton 641*e18f3ffbSAndrew Morton preempt_disable(); /* CPU hotplug */ 64215316ba8SChristoph Lameter for_each_online_cpu(cpu) { 6439bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 6449bfb1839SIngo Molnar 6459bfb1839SIngo Molnar INIT_WORK(work, func); 6469bfb1839SIngo Molnar set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 6479bfb1839SIngo Molnar __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); 64815316ba8SChristoph Lameter } 649*e18f3ffbSAndrew Morton preempt_enable(); 65015316ba8SChristoph Lameter flush_workqueue(keventd_wq); 651b6136773SAndrew Morton free_percpu(works); 65215316ba8SChristoph Lameter return 0; 65315316ba8SChristoph Lameter } 65415316ba8SChristoph Lameter 6551da177e4SLinus Torvalds void flush_scheduled_work(void) 6561da177e4SLinus Torvalds { 6571da177e4SLinus Torvalds flush_workqueue(keventd_wq); 6581da177e4SLinus Torvalds } 659ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 6601da177e4SLinus Torvalds 6611da177e4SLinus Torvalds /** 66272fd4a35SRobert P. J. Day * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. 6631da177e4SLinus Torvalds * @wq: the controlling workqueue structure 66452bad64dSDavid Howells * @dwork: the delayed work struct 6651da177e4SLinus Torvalds */ 66681ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 66752bad64dSDavid Howells struct delayed_work *dwork) 6681da177e4SLinus Torvalds { 66952bad64dSDavid Howells while (!cancel_delayed_work(dwork)) 6701da177e4SLinus Torvalds flush_workqueue(wq); 6711da177e4SLinus Torvalds } 67281ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 6731da177e4SLinus Torvalds 6741da177e4SLinus Torvalds /** 67572fd4a35SRobert P. J. Day * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. 67652bad64dSDavid Howells * @dwork: the delayed work struct 6771da177e4SLinus Torvalds */ 67852bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork) 6791da177e4SLinus Torvalds { 68052bad64dSDavid Howells cancel_rearming_delayed_workqueue(keventd_wq, dwork); 6811da177e4SLinus Torvalds } 6821da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 6831da177e4SLinus Torvalds 6841fa44ecaSJames Bottomley /** 6851fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 6861fa44ecaSJames Bottomley * @fn: the function to execute 6871fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 6881fa44ecaSJames Bottomley * be available when the work executes) 6891fa44ecaSJames Bottomley * 6901fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 6911fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 6921fa44ecaSJames Bottomley * 6931fa44ecaSJames Bottomley * Returns: 0 - function was executed 6941fa44ecaSJames Bottomley * 1 - function was scheduled for execution 6951fa44ecaSJames Bottomley */ 69665f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 6971fa44ecaSJames Bottomley { 6981fa44ecaSJames Bottomley if (!in_interrupt()) { 69965f27f38SDavid Howells fn(&ew->work); 7001fa44ecaSJames Bottomley return 0; 7011fa44ecaSJames Bottomley } 7021fa44ecaSJames Bottomley 70365f27f38SDavid Howells INIT_WORK(&ew->work, fn); 7041fa44ecaSJames Bottomley schedule_work(&ew->work); 7051fa44ecaSJames Bottomley 7061fa44ecaSJames Bottomley return 1; 7071fa44ecaSJames Bottomley } 7081fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 7091fa44ecaSJames Bottomley 7101da177e4SLinus Torvalds int keventd_up(void) 7111da177e4SLinus Torvalds { 7121da177e4SLinus Torvalds return keventd_wq != NULL; 7131da177e4SLinus Torvalds } 7141da177e4SLinus Torvalds 7151da177e4SLinus Torvalds int current_is_keventd(void) 7161da177e4SLinus Torvalds { 7171da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 7181da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 7191da177e4SLinus Torvalds int ret = 0; 7201da177e4SLinus Torvalds 7211da177e4SLinus Torvalds BUG_ON(!keventd_wq); 7221da177e4SLinus Torvalds 72389ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 7241da177e4SLinus Torvalds if (current == cwq->thread) 7251da177e4SLinus Torvalds ret = 1; 7261da177e4SLinus Torvalds 7271da177e4SLinus Torvalds return ret; 7281da177e4SLinus Torvalds 7291da177e4SLinus Torvalds } 7301da177e4SLinus Torvalds 7311da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */ 7321da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 7331da177e4SLinus Torvalds { 73489ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 735626ab0e6SOleg Nesterov struct list_head list; 7361da177e4SLinus Torvalds struct work_struct *work; 7371da177e4SLinus Torvalds 7381da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 739626ab0e6SOleg Nesterov list_replace_init(&cwq->worklist, &list); 7401da177e4SLinus Torvalds 7411da177e4SLinus Torvalds while (!list_empty(&list)) { 7421da177e4SLinus Torvalds printk("Taking work for %s\n", wq->name); 7431da177e4SLinus Torvalds work = list_entry(list.next,struct work_struct,entry); 7441da177e4SLinus Torvalds list_del(&work->entry); 74589ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); 7461da177e4SLinus Torvalds } 7471da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 7481da177e4SLinus Torvalds } 7491da177e4SLinus Torvalds 7501da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */ 7519c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 7521da177e4SLinus Torvalds unsigned long action, 7531da177e4SLinus Torvalds void *hcpu) 7541da177e4SLinus Torvalds { 7551da177e4SLinus Torvalds unsigned int hotcpu = (unsigned long)hcpu; 7561da177e4SLinus Torvalds struct workqueue_struct *wq; 7571da177e4SLinus Torvalds 7581da177e4SLinus Torvalds switch (action) { 7591da177e4SLinus Torvalds case CPU_UP_PREPARE: 7609b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7611da177e4SLinus Torvalds /* Create a new workqueue thread for it. */ 7621da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 763341a5958SRafael J. Wysocki if (!create_workqueue_thread(wq, hotcpu, 0)) { 7641da177e4SLinus Torvalds printk("workqueue for %i failed\n", hotcpu); 7651da177e4SLinus Torvalds return NOTIFY_BAD; 7661da177e4SLinus Torvalds } 7671da177e4SLinus Torvalds } 7681da177e4SLinus Torvalds break; 7691da177e4SLinus Torvalds 7701da177e4SLinus Torvalds case CPU_ONLINE: 7711da177e4SLinus Torvalds /* Kick off worker threads. */ 7721da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 77389ada679SChristoph Lameter struct cpu_workqueue_struct *cwq; 77489ada679SChristoph Lameter 77589ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); 77689ada679SChristoph Lameter kthread_bind(cwq->thread, hotcpu); 77789ada679SChristoph Lameter wake_up_process(cwq->thread); 7781da177e4SLinus Torvalds } 7799b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7801da177e4SLinus Torvalds break; 7811da177e4SLinus Torvalds 7821da177e4SLinus Torvalds case CPU_UP_CANCELED: 7831da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 784fc75cdfaSHeiko Carstens if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) 785fc75cdfaSHeiko Carstens continue; 7861da177e4SLinus Torvalds /* Unbind so it can run. */ 78789ada679SChristoph Lameter kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 788a4c4af7cSHeiko Carstens any_online_cpu(cpu_online_map)); 7891da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 7901da177e4SLinus Torvalds } 7919b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7929b41ea72SAndrew Morton break; 7939b41ea72SAndrew Morton 7949b41ea72SAndrew Morton case CPU_DOWN_PREPARE: 7959b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7969b41ea72SAndrew Morton break; 7979b41ea72SAndrew Morton 7989b41ea72SAndrew Morton case CPU_DOWN_FAILED: 7999b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 8001da177e4SLinus Torvalds break; 8011da177e4SLinus Torvalds 8021da177e4SLinus Torvalds case CPU_DEAD: 8031da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 8041da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 8051da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 8061da177e4SLinus Torvalds take_over_work(wq, hotcpu); 8079b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 8081da177e4SLinus Torvalds break; 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds return NOTIFY_OK; 8121da177e4SLinus Torvalds } 8131da177e4SLinus Torvalds 8141da177e4SLinus Torvalds void init_workqueues(void) 8151da177e4SLinus Torvalds { 816f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 8171da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 8181da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 8191da177e4SLinus Torvalds BUG_ON(!keventd_wq); 8201da177e4SLinus Torvalds } 8211da177e4SLinus Torvalds 822