11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <[email protected]> 121da177e4SLinus Torvalds * Andrew Morton <[email protected]> 131da177e4SLinus Torvalds * Kai Petzke <[email protected]> 141da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <[email protected]>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds /* 37f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 38f756d5e2SNathan Lynch * possible cpu). 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds struct cpu_workqueue_struct { 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds spinlock_t lock; 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds struct list_head worklist; 451da177e4SLinus Torvalds wait_queue_head_t more_work; 463af24433SOleg Nesterov struct work_struct *current_work; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds struct workqueue_struct *wq; 4936c8b586SIngo Molnar struct task_struct *thread; 503af24433SOleg Nesterov int should_stop; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 531da177e4SLinus Torvalds } ____cacheline_aligned; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 561da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 571da177e4SLinus Torvalds * per-CPU workqueues: 581da177e4SLinus Torvalds */ 591da177e4SLinus Torvalds struct workqueue_struct { 6089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 611da177e4SLinus Torvalds const char *name; 621da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 63319c2a98SOleg Nesterov int freezeable; /* Freeze threads during suspend */ 641da177e4SLinus Torvalds }; 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 671da177e4SLinus Torvalds threads to each one as cpus come/go. */ 689b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex); 691da177e4SLinus Torvalds static LIST_HEAD(workqueues); 701da177e4SLinus Torvalds 713af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 723af24433SOleg Nesterov /* optimization, we could use cpu_possible_map */ 733af24433SOleg Nesterov static cpumask_t cpu_populated_map __read_mostly; 74f756d5e2SNathan Lynch 751da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 761da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 771da177e4SLinus Torvalds { 781da177e4SLinus Torvalds return list_empty(&wq->list); 791da177e4SLinus Torvalds } 801da177e4SLinus Torvalds 814594bf15SDavid Howells /* 824594bf15SDavid Howells * Set the workqueue on which a work item is to be run 834594bf15SDavid Howells * - Must *only* be called if the pending flag is set 844594bf15SDavid Howells */ 85365970a1SDavid Howells static inline void set_wq_data(struct work_struct *work, void *wq) 86365970a1SDavid Howells { 874594bf15SDavid Howells unsigned long new; 88365970a1SDavid Howells 894594bf15SDavid Howells BUG_ON(!work_pending(work)); 904594bf15SDavid Howells 91365970a1SDavid Howells new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 92a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 93a08727baSLinus Torvalds atomic_long_set(&work->data, new); 94365970a1SDavid Howells } 95365970a1SDavid Howells 96365970a1SDavid Howells static inline void *get_wq_data(struct work_struct *work) 97365970a1SDavid Howells { 98a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 99365970a1SDavid Howells } 100365970a1SDavid Howells 101b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 102b89deed3SOleg Nesterov struct work_struct *work, int tail) 103b89deed3SOleg Nesterov { 104b89deed3SOleg Nesterov set_wq_data(work, cwq); 105b89deed3SOleg Nesterov if (tail) 106b89deed3SOleg Nesterov list_add_tail(&work->entry, &cwq->worklist); 107b89deed3SOleg Nesterov else 108b89deed3SOleg Nesterov list_add(&work->entry, &cwq->worklist); 109b89deed3SOleg Nesterov wake_up(&cwq->more_work); 110b89deed3SOleg Nesterov } 111b89deed3SOleg Nesterov 1121da177e4SLinus Torvalds /* Preempt must be disabled. */ 1131da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1141da177e4SLinus Torvalds struct work_struct *work) 1151da177e4SLinus Torvalds { 1161da177e4SLinus Torvalds unsigned long flags; 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 119b89deed3SOleg Nesterov insert_work(cwq, work, 1); 1201da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1211da177e4SLinus Torvalds } 1221da177e4SLinus Torvalds 1230fcb78c2SRolf Eike Beer /** 1240fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1250fcb78c2SRolf Eike Beer * @wq: workqueue to use 1260fcb78c2SRolf Eike Beer * @work: work to queue 1270fcb78c2SRolf Eike Beer * 128057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1291da177e4SLinus Torvalds * 1301da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 1311da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 1321da177e4SLinus Torvalds */ 1331da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 1341da177e4SLinus Torvalds { 1351da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 1361da177e4SLinus Torvalds 137a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 1381da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 139f756d5e2SNathan Lynch cpu = singlethread_cpu; 1401da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 14189ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 1421da177e4SLinus Torvalds ret = 1; 1431da177e4SLinus Torvalds } 1441da177e4SLinus Torvalds put_cpu(); 1451da177e4SLinus Torvalds return ret; 1461da177e4SLinus Torvalds } 147ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 1481da177e4SLinus Torvalds 14982f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data) 1501da177e4SLinus Torvalds { 15152bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 152365970a1SDavid Howells struct workqueue_struct *wq = get_wq_data(&dwork->work); 1531da177e4SLinus Torvalds int cpu = smp_processor_id(); 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 156f756d5e2SNathan Lynch cpu = singlethread_cpu; 1571da177e4SLinus Torvalds 15852bad64dSDavid Howells __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); 1591da177e4SLinus Torvalds } 1601da177e4SLinus Torvalds 1610fcb78c2SRolf Eike Beer /** 1620fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 1630fcb78c2SRolf Eike Beer * @wq: workqueue to use 164af9997e4SRandy Dunlap * @dwork: delayable work to queue 1650fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 1660fcb78c2SRolf Eike Beer * 167057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1680fcb78c2SRolf Eike Beer */ 1691da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 17052bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 1711da177e4SLinus Torvalds { 1721da177e4SLinus Torvalds int ret = 0; 17352bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 17452bad64dSDavid Howells struct work_struct *work = &dwork->work; 17552bad64dSDavid Howells 17682f67cd9SIngo Molnar timer_stats_timer_set_start_info(timer); 17752bad64dSDavid Howells if (delay == 0) 17852bad64dSDavid Howells return queue_work(wq, work); 1791da177e4SLinus Torvalds 180a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 1811da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 1821da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 1831da177e4SLinus Torvalds 1841da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 185365970a1SDavid Howells set_wq_data(work, wq); 1861da177e4SLinus Torvalds timer->expires = jiffies + delay; 18752bad64dSDavid Howells timer->data = (unsigned long)dwork; 1881da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 1891da177e4SLinus Torvalds add_timer(timer); 1901da177e4SLinus Torvalds ret = 1; 1911da177e4SLinus Torvalds } 1921da177e4SLinus Torvalds return ret; 1931da177e4SLinus Torvalds } 194ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 1951da177e4SLinus Torvalds 1960fcb78c2SRolf Eike Beer /** 1970fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 1980fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 1990fcb78c2SRolf Eike Beer * @wq: workqueue to use 200af9997e4SRandy Dunlap * @dwork: work to queue 2010fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2020fcb78c2SRolf Eike Beer * 203057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2040fcb78c2SRolf Eike Beer */ 2057a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 20652bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2077a6bc1cdSVenkatesh Pallipadi { 2087a6bc1cdSVenkatesh Pallipadi int ret = 0; 20952bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 21052bad64dSDavid Howells struct work_struct *work = &dwork->work; 2117a6bc1cdSVenkatesh Pallipadi 212a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2137a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2147a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2157a6bc1cdSVenkatesh Pallipadi 2167a6bc1cdSVenkatesh Pallipadi /* This stores wq for the moment, for the timer_fn */ 217365970a1SDavid Howells set_wq_data(work, wq); 2187a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 21952bad64dSDavid Howells timer->data = (unsigned long)dwork; 2207a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 2217a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 2227a6bc1cdSVenkatesh Pallipadi ret = 1; 2237a6bc1cdSVenkatesh Pallipadi } 2247a6bc1cdSVenkatesh Pallipadi return ret; 2257a6bc1cdSVenkatesh Pallipadi } 226ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2271da177e4SLinus Torvalds 228858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 2291da177e4SLinus Torvalds { 230*f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 2311da177e4SLinus Torvalds cwq->run_depth++; 2321da177e4SLinus Torvalds if (cwq->run_depth > 3) { 2331da177e4SLinus Torvalds /* morton gets to eat his hat */ 2341da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 2351da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 2361da177e4SLinus Torvalds dump_stack(); 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 2391da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 2401da177e4SLinus Torvalds struct work_struct, entry); 2416bb49e59SDavid Howells work_func_t f = work->func; 2421da177e4SLinus Torvalds 243b89deed3SOleg Nesterov cwq->current_work = work; 2441da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 245*f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 2461da177e4SLinus Torvalds 247365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 248a08727baSLinus Torvalds if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) 24965f27f38SDavid Howells work_release(work); 25065f27f38SDavid Howells f(work); 2511da177e4SLinus Torvalds 252d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 253d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 254d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 255d5abe669SPeter Zijlstra current->comm, preempt_count(), 256d5abe669SPeter Zijlstra current->pid); 257d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 258d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 259d5abe669SPeter Zijlstra debug_show_held_locks(current); 260d5abe669SPeter Zijlstra dump_stack(); 261d5abe669SPeter Zijlstra } 262d5abe669SPeter Zijlstra 263*f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 264b89deed3SOleg Nesterov cwq->current_work = NULL; 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds cwq->run_depth--; 267*f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds 2703af24433SOleg Nesterov /* 2713af24433SOleg Nesterov * NOTE: the caller must not touch *cwq if this func returns true 2723af24433SOleg Nesterov */ 2733af24433SOleg Nesterov static int cwq_should_stop(struct cpu_workqueue_struct *cwq) 2743af24433SOleg Nesterov { 2753af24433SOleg Nesterov int should_stop = cwq->should_stop; 2763af24433SOleg Nesterov 2773af24433SOleg Nesterov if (unlikely(should_stop)) { 2783af24433SOleg Nesterov spin_lock_irq(&cwq->lock); 2793af24433SOleg Nesterov should_stop = cwq->should_stop && list_empty(&cwq->worklist); 2803af24433SOleg Nesterov if (should_stop) 2813af24433SOleg Nesterov cwq->thread = NULL; 2823af24433SOleg Nesterov spin_unlock_irq(&cwq->lock); 2833af24433SOleg Nesterov } 2843af24433SOleg Nesterov 2853af24433SOleg Nesterov return should_stop; 2863af24433SOleg Nesterov } 2873af24433SOleg Nesterov 2881da177e4SLinus Torvalds static int worker_thread(void *__cwq) 2891da177e4SLinus Torvalds { 2901da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 2913af24433SOleg Nesterov DEFINE_WAIT(wait); 2921da177e4SLinus Torvalds struct k_sigaction sa; 2931da177e4SLinus Torvalds sigset_t blocked; 2941da177e4SLinus Torvalds 295319c2a98SOleg Nesterov if (!cwq->wq->freezeable) 2961da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds set_user_nice(current, -5); 2991da177e4SLinus Torvalds 3001da177e4SLinus Torvalds /* Block and flush all signals */ 3011da177e4SLinus Torvalds sigfillset(&blocked); 3021da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 3031da177e4SLinus Torvalds flush_signals(current); 3041da177e4SLinus Torvalds 30546934023SChristoph Lameter /* 30646934023SChristoph Lameter * We inherited MPOL_INTERLEAVE from the booting kernel. 30746934023SChristoph Lameter * Set MPOL_DEFAULT to insure node local allocations. 30846934023SChristoph Lameter */ 30946934023SChristoph Lameter numa_default_policy(); 31046934023SChristoph Lameter 3111da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 3121da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 3131da177e4SLinus Torvalds sa.sa.sa_flags = 0; 3141da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 3151da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 3161da177e4SLinus Torvalds 3173af24433SOleg Nesterov for (;;) { 318319c2a98SOleg Nesterov if (cwq->wq->freezeable) 319341a5958SRafael J. Wysocki try_to_freeze(); 320341a5958SRafael J. Wysocki 3213af24433SOleg Nesterov prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 3223af24433SOleg Nesterov if (!cwq->should_stop && list_empty(&cwq->worklist)) 3231da177e4SLinus Torvalds schedule(); 3243af24433SOleg Nesterov finish_wait(&cwq->more_work, &wait); 3251da177e4SLinus Torvalds 3263af24433SOleg Nesterov if (cwq_should_stop(cwq)) 3273af24433SOleg Nesterov break; 3283af24433SOleg Nesterov 3291da177e4SLinus Torvalds run_workqueue(cwq); 3301da177e4SLinus Torvalds } 3313af24433SOleg Nesterov 3321da177e4SLinus Torvalds return 0; 3331da177e4SLinus Torvalds } 3341da177e4SLinus Torvalds 335fc2e4d70SOleg Nesterov struct wq_barrier { 336fc2e4d70SOleg Nesterov struct work_struct work; 337fc2e4d70SOleg Nesterov struct completion done; 338fc2e4d70SOleg Nesterov }; 339fc2e4d70SOleg Nesterov 340fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 341fc2e4d70SOleg Nesterov { 342fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 343fc2e4d70SOleg Nesterov complete(&barr->done); 344fc2e4d70SOleg Nesterov } 345fc2e4d70SOleg Nesterov 34683c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 34783c22520SOleg Nesterov struct wq_barrier *barr, int tail) 348fc2e4d70SOleg Nesterov { 349fc2e4d70SOleg Nesterov INIT_WORK(&barr->work, wq_barrier_func); 350fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 351fc2e4d70SOleg Nesterov 352fc2e4d70SOleg Nesterov init_completion(&barr->done); 35383c22520SOleg Nesterov 35483c22520SOleg Nesterov insert_work(cwq, &barr->work, tail); 355fc2e4d70SOleg Nesterov } 356fc2e4d70SOleg Nesterov 3571da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 3581da177e4SLinus Torvalds { 3591da177e4SLinus Torvalds if (cwq->thread == current) { 3601da177e4SLinus Torvalds /* 3611da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 3621da177e4SLinus Torvalds * it by hand rather than deadlocking. 3631da177e4SLinus Torvalds */ 3641da177e4SLinus Torvalds run_workqueue(cwq); 3651da177e4SLinus Torvalds } else { 366fc2e4d70SOleg Nesterov struct wq_barrier barr; 36783c22520SOleg Nesterov int active = 0; 3681da177e4SLinus Torvalds 36983c22520SOleg Nesterov spin_lock_irq(&cwq->lock); 37083c22520SOleg Nesterov if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 37183c22520SOleg Nesterov insert_wq_barrier(cwq, &barr, 1); 37283c22520SOleg Nesterov active = 1; 37383c22520SOleg Nesterov } 37483c22520SOleg Nesterov spin_unlock_irq(&cwq->lock); 3751da177e4SLinus Torvalds 376d721304dSOleg Nesterov if (active) 377fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 3781da177e4SLinus Torvalds } 37983c22520SOleg Nesterov } 3801da177e4SLinus Torvalds 3810fcb78c2SRolf Eike Beer /** 3821da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 3830fcb78c2SRolf Eike Beer * @wq: workqueue to flush 3841da177e4SLinus Torvalds * 3851da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 3861da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 3871da177e4SLinus Torvalds * 388fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 389fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 3901da177e4SLinus Torvalds * 3911da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 3921da177e4SLinus Torvalds * helper threads to do it. 3931da177e4SLinus Torvalds */ 3941da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 3951da177e4SLinus Torvalds { 396*f293ea92SOleg Nesterov might_sleep(); 397*f293ea92SOleg Nesterov 3983af24433SOleg Nesterov if (is_single_threaded(wq)) 399f756d5e2SNathan Lynch flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 4003af24433SOleg Nesterov else { 4011da177e4SLinus Torvalds int cpu; 4021da177e4SLinus Torvalds 4033af24433SOleg Nesterov for_each_cpu_mask(cpu, cpu_populated_map) 40489ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 4051da177e4SLinus Torvalds } 4061da177e4SLinus Torvalds } 407ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 4081da177e4SLinus Torvalds 409b89deed3SOleg Nesterov static void wait_on_work(struct cpu_workqueue_struct *cwq, 410b89deed3SOleg Nesterov struct work_struct *work) 411b89deed3SOleg Nesterov { 412b89deed3SOleg Nesterov struct wq_barrier barr; 413b89deed3SOleg Nesterov int running = 0; 414b89deed3SOleg Nesterov 415b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 416b89deed3SOleg Nesterov if (unlikely(cwq->current_work == work)) { 41783c22520SOleg Nesterov insert_wq_barrier(cwq, &barr, 0); 418b89deed3SOleg Nesterov running = 1; 419b89deed3SOleg Nesterov } 420b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 421b89deed3SOleg Nesterov 4223af24433SOleg Nesterov if (unlikely(running)) 423b89deed3SOleg Nesterov wait_for_completion(&barr.done); 424b89deed3SOleg Nesterov } 425b89deed3SOleg Nesterov 426b89deed3SOleg Nesterov /** 427b89deed3SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 428b89deed3SOleg Nesterov * @wq: the workqueue on which the work is queued 429b89deed3SOleg Nesterov * @work: the work which is to be flushed 430b89deed3SOleg Nesterov * 431b89deed3SOleg Nesterov * flush_work() will attempt to cancel the work if it is queued. If the work's 432b89deed3SOleg Nesterov * callback appears to be running, flush_work() will block until it has 433b89deed3SOleg Nesterov * completed. 434b89deed3SOleg Nesterov * 435b89deed3SOleg Nesterov * flush_work() is designed to be used when the caller is tearing down data 436b89deed3SOleg Nesterov * structures which the callback function operates upon. It is expected that, 437b89deed3SOleg Nesterov * prior to calling flush_work(), the caller has arranged for the work to not 438b89deed3SOleg Nesterov * be requeued. 439b89deed3SOleg Nesterov */ 440b89deed3SOleg Nesterov void flush_work(struct workqueue_struct *wq, struct work_struct *work) 441b89deed3SOleg Nesterov { 442b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 443b89deed3SOleg Nesterov 444*f293ea92SOleg Nesterov might_sleep(); 445*f293ea92SOleg Nesterov 446b89deed3SOleg Nesterov cwq = get_wq_data(work); 447b89deed3SOleg Nesterov /* Was it ever queued ? */ 448b89deed3SOleg Nesterov if (!cwq) 4493af24433SOleg Nesterov return; 450b89deed3SOleg Nesterov 451b89deed3SOleg Nesterov /* 4523af24433SOleg Nesterov * This work can't be re-queued, no need to re-check that 4533af24433SOleg Nesterov * get_wq_data() is still the same when we take cwq->lock. 454b89deed3SOleg Nesterov */ 455b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 456b89deed3SOleg Nesterov list_del_init(&work->entry); 457b89deed3SOleg Nesterov work_release(work); 458b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 459b89deed3SOleg Nesterov 4603af24433SOleg Nesterov if (is_single_threaded(wq)) 461b89deed3SOleg Nesterov wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work); 4623af24433SOleg Nesterov else { 463b89deed3SOleg Nesterov int cpu; 464b89deed3SOleg Nesterov 4653af24433SOleg Nesterov for_each_cpu_mask(cpu, cpu_populated_map) 466b89deed3SOleg Nesterov wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 467b89deed3SOleg Nesterov } 468b89deed3SOleg Nesterov } 469b89deed3SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 470b89deed3SOleg Nesterov 4711da177e4SLinus Torvalds 4721da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 4731da177e4SLinus Torvalds 4740fcb78c2SRolf Eike Beer /** 4750fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 4760fcb78c2SRolf Eike Beer * @work: job to be done 4770fcb78c2SRolf Eike Beer * 4780fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 4790fcb78c2SRolf Eike Beer */ 4801da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 4811da177e4SLinus Torvalds { 4821da177e4SLinus Torvalds return queue_work(keventd_wq, work); 4831da177e4SLinus Torvalds } 484ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 4851da177e4SLinus Torvalds 4860fcb78c2SRolf Eike Beer /** 4870fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 48852bad64dSDavid Howells * @dwork: job to be done 48952bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 4900fcb78c2SRolf Eike Beer * 4910fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 4920fcb78c2SRolf Eike Beer * workqueue. 4930fcb78c2SRolf Eike Beer */ 49482f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork, 49582f67cd9SIngo Molnar unsigned long delay) 4961da177e4SLinus Torvalds { 49782f67cd9SIngo Molnar timer_stats_timer_set_start_info(&dwork->timer); 49852bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 4991da177e4SLinus Torvalds } 500ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 5011da177e4SLinus Torvalds 5020fcb78c2SRolf Eike Beer /** 5030fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 5040fcb78c2SRolf Eike Beer * @cpu: cpu to use 50552bad64dSDavid Howells * @dwork: job to be done 5060fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 5070fcb78c2SRolf Eike Beer * 5080fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5090fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 5100fcb78c2SRolf Eike Beer */ 5111da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 51252bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 5131da177e4SLinus Torvalds { 51452bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 5151da177e4SLinus Torvalds } 516ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 5171da177e4SLinus Torvalds 518b6136773SAndrew Morton /** 519b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 520b6136773SAndrew Morton * @func: the function to call 521b6136773SAndrew Morton * 522b6136773SAndrew Morton * Returns zero on success. 523b6136773SAndrew Morton * Returns -ve errno on failure. 524b6136773SAndrew Morton * 525b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 526b6136773SAndrew Morton * 527b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 528b6136773SAndrew Morton */ 52965f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 53015316ba8SChristoph Lameter { 53115316ba8SChristoph Lameter int cpu; 532b6136773SAndrew Morton struct work_struct *works; 53315316ba8SChristoph Lameter 534b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 535b6136773SAndrew Morton if (!works) 53615316ba8SChristoph Lameter return -ENOMEM; 537b6136773SAndrew Morton 538e18f3ffbSAndrew Morton preempt_disable(); /* CPU hotplug */ 53915316ba8SChristoph Lameter for_each_online_cpu(cpu) { 5409bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 5419bfb1839SIngo Molnar 5429bfb1839SIngo Molnar INIT_WORK(work, func); 5439bfb1839SIngo Molnar set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 5449bfb1839SIngo Molnar __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); 54515316ba8SChristoph Lameter } 546e18f3ffbSAndrew Morton preempt_enable(); 54715316ba8SChristoph Lameter flush_workqueue(keventd_wq); 548b6136773SAndrew Morton free_percpu(works); 54915316ba8SChristoph Lameter return 0; 55015316ba8SChristoph Lameter } 55115316ba8SChristoph Lameter 5521da177e4SLinus Torvalds void flush_scheduled_work(void) 5531da177e4SLinus Torvalds { 5541da177e4SLinus Torvalds flush_workqueue(keventd_wq); 5551da177e4SLinus Torvalds } 556ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 5571da177e4SLinus Torvalds 558b89deed3SOleg Nesterov void flush_work_keventd(struct work_struct *work) 559b89deed3SOleg Nesterov { 560b89deed3SOleg Nesterov flush_work(keventd_wq, work); 561b89deed3SOleg Nesterov } 562b89deed3SOleg Nesterov EXPORT_SYMBOL(flush_work_keventd); 563b89deed3SOleg Nesterov 5641da177e4SLinus Torvalds /** 56572fd4a35SRobert P. J. Day * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work. 5661da177e4SLinus Torvalds * @wq: the controlling workqueue structure 56752bad64dSDavid Howells * @dwork: the delayed work struct 5681da177e4SLinus Torvalds */ 56981ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 57052bad64dSDavid Howells struct delayed_work *dwork) 5711da177e4SLinus Torvalds { 57252bad64dSDavid Howells while (!cancel_delayed_work(dwork)) 5731da177e4SLinus Torvalds flush_workqueue(wq); 5741da177e4SLinus Torvalds } 57581ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 5761da177e4SLinus Torvalds 5771da177e4SLinus Torvalds /** 57872fd4a35SRobert P. J. Day * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work. 57952bad64dSDavid Howells * @dwork: the delayed work struct 5801da177e4SLinus Torvalds */ 58152bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork) 5821da177e4SLinus Torvalds { 58352bad64dSDavid Howells cancel_rearming_delayed_workqueue(keventd_wq, dwork); 5841da177e4SLinus Torvalds } 5851da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 5861da177e4SLinus Torvalds 5871fa44ecaSJames Bottomley /** 5881fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 5891fa44ecaSJames Bottomley * @fn: the function to execute 5901fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 5911fa44ecaSJames Bottomley * be available when the work executes) 5921fa44ecaSJames Bottomley * 5931fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 5941fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 5951fa44ecaSJames Bottomley * 5961fa44ecaSJames Bottomley * Returns: 0 - function was executed 5971fa44ecaSJames Bottomley * 1 - function was scheduled for execution 5981fa44ecaSJames Bottomley */ 59965f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 6001fa44ecaSJames Bottomley { 6011fa44ecaSJames Bottomley if (!in_interrupt()) { 60265f27f38SDavid Howells fn(&ew->work); 6031fa44ecaSJames Bottomley return 0; 6041fa44ecaSJames Bottomley } 6051fa44ecaSJames Bottomley 60665f27f38SDavid Howells INIT_WORK(&ew->work, fn); 6071fa44ecaSJames Bottomley schedule_work(&ew->work); 6081fa44ecaSJames Bottomley 6091fa44ecaSJames Bottomley return 1; 6101fa44ecaSJames Bottomley } 6111fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 6121fa44ecaSJames Bottomley 6131da177e4SLinus Torvalds int keventd_up(void) 6141da177e4SLinus Torvalds { 6151da177e4SLinus Torvalds return keventd_wq != NULL; 6161da177e4SLinus Torvalds } 6171da177e4SLinus Torvalds 6181da177e4SLinus Torvalds int current_is_keventd(void) 6191da177e4SLinus Torvalds { 6201da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 6211da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 6221da177e4SLinus Torvalds int ret = 0; 6231da177e4SLinus Torvalds 6241da177e4SLinus Torvalds BUG_ON(!keventd_wq); 6251da177e4SLinus Torvalds 62689ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 6271da177e4SLinus Torvalds if (current == cwq->thread) 6281da177e4SLinus Torvalds ret = 1; 6291da177e4SLinus Torvalds 6301da177e4SLinus Torvalds return ret; 6311da177e4SLinus Torvalds 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds 6343af24433SOleg Nesterov static struct cpu_workqueue_struct * 6353af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu) 6361da177e4SLinus Torvalds { 63789ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 6383af24433SOleg Nesterov 6393af24433SOleg Nesterov cwq->wq = wq; 6403af24433SOleg Nesterov spin_lock_init(&cwq->lock); 6413af24433SOleg Nesterov INIT_LIST_HEAD(&cwq->worklist); 6423af24433SOleg Nesterov init_waitqueue_head(&cwq->more_work); 6433af24433SOleg Nesterov 6443af24433SOleg Nesterov return cwq; 6453af24433SOleg Nesterov } 6463af24433SOleg Nesterov 6473af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 6483af24433SOleg Nesterov { 6493af24433SOleg Nesterov struct workqueue_struct *wq = cwq->wq; 6503af24433SOleg Nesterov const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; 6513af24433SOleg Nesterov struct task_struct *p; 6523af24433SOleg Nesterov 6533af24433SOleg Nesterov p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 6543af24433SOleg Nesterov /* 6553af24433SOleg Nesterov * Nobody can add the work_struct to this cwq, 6563af24433SOleg Nesterov * if (caller is __create_workqueue) 6573af24433SOleg Nesterov * nobody should see this wq 6583af24433SOleg Nesterov * else // caller is CPU_UP_PREPARE 6593af24433SOleg Nesterov * cpu is not on cpu_online_map 6603af24433SOleg Nesterov * so we can abort safely. 6613af24433SOleg Nesterov */ 6623af24433SOleg Nesterov if (IS_ERR(p)) 6633af24433SOleg Nesterov return PTR_ERR(p); 6643af24433SOleg Nesterov 6653af24433SOleg Nesterov cwq->thread = p; 6663af24433SOleg Nesterov cwq->should_stop = 0; 6673af24433SOleg Nesterov if (!is_single_threaded(wq)) 6683af24433SOleg Nesterov kthread_bind(p, cpu); 6693af24433SOleg Nesterov 6703af24433SOleg Nesterov if (is_single_threaded(wq) || cpu_online(cpu)) 6713af24433SOleg Nesterov wake_up_process(p); 6723af24433SOleg Nesterov 6733af24433SOleg Nesterov return 0; 6743af24433SOleg Nesterov } 6753af24433SOleg Nesterov 6763af24433SOleg Nesterov struct workqueue_struct *__create_workqueue(const char *name, 6773af24433SOleg Nesterov int singlethread, int freezeable) 6783af24433SOleg Nesterov { 6793af24433SOleg Nesterov struct workqueue_struct *wq; 6803af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 6813af24433SOleg Nesterov int err = 0, cpu; 6823af24433SOleg Nesterov 6833af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 6843af24433SOleg Nesterov if (!wq) 6853af24433SOleg Nesterov return NULL; 6863af24433SOleg Nesterov 6873af24433SOleg Nesterov wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 6883af24433SOleg Nesterov if (!wq->cpu_wq) { 6893af24433SOleg Nesterov kfree(wq); 6903af24433SOleg Nesterov return NULL; 6913af24433SOleg Nesterov } 6923af24433SOleg Nesterov 6933af24433SOleg Nesterov wq->name = name; 6943af24433SOleg Nesterov wq->freezeable = freezeable; 6953af24433SOleg Nesterov 6963af24433SOleg Nesterov if (singlethread) { 6973af24433SOleg Nesterov INIT_LIST_HEAD(&wq->list); 6983af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, singlethread_cpu); 6993af24433SOleg Nesterov err = create_workqueue_thread(cwq, singlethread_cpu); 7003af24433SOleg Nesterov } else { 7013af24433SOleg Nesterov mutex_lock(&workqueue_mutex); 7023af24433SOleg Nesterov list_add(&wq->list, &workqueues); 7033af24433SOleg Nesterov 7043af24433SOleg Nesterov for_each_possible_cpu(cpu) { 7053af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, cpu); 7063af24433SOleg Nesterov if (err || !cpu_online(cpu)) 7073af24433SOleg Nesterov continue; 7083af24433SOleg Nesterov err = create_workqueue_thread(cwq, cpu); 7093af24433SOleg Nesterov } 7103af24433SOleg Nesterov mutex_unlock(&workqueue_mutex); 7113af24433SOleg Nesterov } 7123af24433SOleg Nesterov 7133af24433SOleg Nesterov if (err) { 7143af24433SOleg Nesterov destroy_workqueue(wq); 7153af24433SOleg Nesterov wq = NULL; 7163af24433SOleg Nesterov } 7173af24433SOleg Nesterov return wq; 7183af24433SOleg Nesterov } 7193af24433SOleg Nesterov EXPORT_SYMBOL_GPL(__create_workqueue); 7203af24433SOleg Nesterov 7213af24433SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 7223af24433SOleg Nesterov { 7233af24433SOleg Nesterov struct wq_barrier barr; 7243af24433SOleg Nesterov int alive = 0; 7251da177e4SLinus Torvalds 7261da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 7273af24433SOleg Nesterov if (cwq->thread != NULL) { 7283af24433SOleg Nesterov insert_wq_barrier(cwq, &barr, 1); 7293af24433SOleg Nesterov cwq->should_stop = 1; 7303af24433SOleg Nesterov alive = 1; 7311da177e4SLinus Torvalds } 7321da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 7333af24433SOleg Nesterov 7343af24433SOleg Nesterov if (alive) { 7353af24433SOleg Nesterov wait_for_completion(&barr.done); 7363af24433SOleg Nesterov 7373af24433SOleg Nesterov while (unlikely(cwq->thread != NULL)) 7383af24433SOleg Nesterov cpu_relax(); 7393af24433SOleg Nesterov /* 7403af24433SOleg Nesterov * Wait until cwq->thread unlocks cwq->lock, 7413af24433SOleg Nesterov * it won't touch *cwq after that. 7423af24433SOleg Nesterov */ 7433af24433SOleg Nesterov smp_rmb(); 7443af24433SOleg Nesterov spin_unlock_wait(&cwq->lock); 7453af24433SOleg Nesterov } 7461da177e4SLinus Torvalds } 7471da177e4SLinus Torvalds 7483af24433SOleg Nesterov /** 7493af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 7503af24433SOleg Nesterov * @wq: target workqueue 7513af24433SOleg Nesterov * 7523af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 7533af24433SOleg Nesterov */ 7543af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 7553af24433SOleg Nesterov { 7563af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 7573af24433SOleg Nesterov 7583af24433SOleg Nesterov if (is_single_threaded(wq)) { 7593af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu); 7603af24433SOleg Nesterov cleanup_workqueue_thread(cwq, singlethread_cpu); 7613af24433SOleg Nesterov } else { 7623af24433SOleg Nesterov int cpu; 7633af24433SOleg Nesterov 7643af24433SOleg Nesterov mutex_lock(&workqueue_mutex); 7653af24433SOleg Nesterov list_del(&wq->list); 7663af24433SOleg Nesterov mutex_unlock(&workqueue_mutex); 7673af24433SOleg Nesterov 7683af24433SOleg Nesterov for_each_cpu_mask(cpu, cpu_populated_map) { 7693af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 7703af24433SOleg Nesterov cleanup_workqueue_thread(cwq, cpu); 7713af24433SOleg Nesterov } 7723af24433SOleg Nesterov } 7733af24433SOleg Nesterov 7743af24433SOleg Nesterov free_percpu(wq->cpu_wq); 7753af24433SOleg Nesterov kfree(wq); 7763af24433SOleg Nesterov } 7773af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 7783af24433SOleg Nesterov 7799c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 7801da177e4SLinus Torvalds unsigned long action, 7811da177e4SLinus Torvalds void *hcpu) 7821da177e4SLinus Torvalds { 7833af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 7843af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 7851da177e4SLinus Torvalds struct workqueue_struct *wq; 7861da177e4SLinus Torvalds 7871da177e4SLinus Torvalds switch (action) { 7883af24433SOleg Nesterov case CPU_LOCK_ACQUIRE: 7899b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7903af24433SOleg Nesterov return NOTIFY_OK; 7913af24433SOleg Nesterov 7923af24433SOleg Nesterov case CPU_LOCK_RELEASE: 7933af24433SOleg Nesterov mutex_unlock(&workqueue_mutex); 7943af24433SOleg Nesterov return NOTIFY_OK; 7953af24433SOleg Nesterov 7963af24433SOleg Nesterov case CPU_UP_PREPARE: 7973af24433SOleg Nesterov cpu_set(cpu, cpu_populated_map); 7983af24433SOleg Nesterov } 7993af24433SOleg Nesterov 8001da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 8013af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 8023af24433SOleg Nesterov 8033af24433SOleg Nesterov switch (action) { 8043af24433SOleg Nesterov case CPU_UP_PREPARE: 8053af24433SOleg Nesterov if (!create_workqueue_thread(cwq, cpu)) 8061da177e4SLinus Torvalds break; 8073af24433SOleg Nesterov printk(KERN_ERR "workqueue for %i failed\n", cpu); 8083af24433SOleg Nesterov return NOTIFY_BAD; 8091da177e4SLinus Torvalds 8101da177e4SLinus Torvalds case CPU_ONLINE: 81189ada679SChristoph Lameter wake_up_process(cwq->thread); 8121da177e4SLinus Torvalds break; 8131da177e4SLinus Torvalds 8141da177e4SLinus Torvalds case CPU_UP_CANCELED: 8153af24433SOleg Nesterov if (cwq->thread) 8163af24433SOleg Nesterov wake_up_process(cwq->thread); 8171da177e4SLinus Torvalds case CPU_DEAD: 8183af24433SOleg Nesterov cleanup_workqueue_thread(cwq, cpu); 8191da177e4SLinus Torvalds break; 8201da177e4SLinus Torvalds } 8213af24433SOleg Nesterov } 8221da177e4SLinus Torvalds 8231da177e4SLinus Torvalds return NOTIFY_OK; 8241da177e4SLinus Torvalds } 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds void init_workqueues(void) 8271da177e4SLinus Torvalds { 8283af24433SOleg Nesterov cpu_populated_map = cpu_online_map; 829f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 8301da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 8311da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 8321da177e4SLinus Torvalds BUG_ON(!keventd_wq); 8331da177e4SLinus Torvalds } 834