11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <[email protected]> 121da177e4SLinus Torvalds * Andrew Morton <[email protected]> 131da177e4SLinus Torvalds * Kai Petzke <[email protected]> 141da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <[email protected]>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds /* 37f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 38f756d5e2SNathan Lynch * possible cpu). 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds struct cpu_workqueue_struct { 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds spinlock_t lock; 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds struct list_head worklist; 451da177e4SLinus Torvalds wait_queue_head_t more_work; 463af24433SOleg Nesterov struct work_struct *current_work; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds struct workqueue_struct *wq; 4936c8b586SIngo Molnar struct task_struct *thread; 503af24433SOleg Nesterov int should_stop; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 531da177e4SLinus Torvalds } ____cacheline_aligned; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 561da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 571da177e4SLinus Torvalds * per-CPU workqueues: 581da177e4SLinus Torvalds */ 591da177e4SLinus Torvalds struct workqueue_struct { 6089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 61cce1a165SOleg Nesterov struct list_head list; 621da177e4SLinus Torvalds const char *name; 63cce1a165SOleg Nesterov int singlethread; 64319c2a98SOleg Nesterov int freezeable; /* Freeze threads during suspend */ 651da177e4SLinus Torvalds }; 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 681da177e4SLinus Torvalds threads to each one as cpus come/go. */ 699b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex); 701da177e4SLinus Torvalds static LIST_HEAD(workqueues); 711da177e4SLinus Torvalds 723af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 73b1f4ec17SOleg Nesterov static cpumask_t cpu_singlethread_map __read_mostly; 743af24433SOleg Nesterov /* optimization, we could use cpu_possible_map */ 753af24433SOleg Nesterov static cpumask_t cpu_populated_map __read_mostly; 76f756d5e2SNathan Lynch 771da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 781da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 791da177e4SLinus Torvalds { 80cce1a165SOleg Nesterov return wq->singlethread; 811da177e4SLinus Torvalds } 821da177e4SLinus Torvalds 83b1f4ec17SOleg Nesterov static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) 84b1f4ec17SOleg Nesterov { 85b1f4ec17SOleg Nesterov return is_single_threaded(wq) 86b1f4ec17SOleg Nesterov ? &cpu_singlethread_map : &cpu_populated_map; 87b1f4ec17SOleg Nesterov } 88b1f4ec17SOleg Nesterov 89a848e3b6SOleg Nesterov static 90a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) 91a848e3b6SOleg Nesterov { 92a848e3b6SOleg Nesterov if (unlikely(is_single_threaded(wq))) 93a848e3b6SOleg Nesterov cpu = singlethread_cpu; 94a848e3b6SOleg Nesterov return per_cpu_ptr(wq->cpu_wq, cpu); 95a848e3b6SOleg Nesterov } 96a848e3b6SOleg Nesterov 974594bf15SDavid Howells /* 984594bf15SDavid Howells * Set the workqueue on which a work item is to be run 994594bf15SDavid Howells * - Must *only* be called if the pending flag is set 1004594bf15SDavid Howells */ 101ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work, 102ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq) 103365970a1SDavid Howells { 1044594bf15SDavid Howells unsigned long new; 105365970a1SDavid Howells 1064594bf15SDavid Howells BUG_ON(!work_pending(work)); 1074594bf15SDavid Howells 108ed7c0feeSOleg Nesterov new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); 109a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 110a08727baSLinus Torvalds atomic_long_set(&work->data, new); 111365970a1SDavid Howells } 112365970a1SDavid Howells 113ed7c0feeSOleg Nesterov static inline 114ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 115365970a1SDavid Howells { 116a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 117365970a1SDavid Howells } 118365970a1SDavid Howells 119b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 120b89deed3SOleg Nesterov struct work_struct *work, int tail) 121b89deed3SOleg Nesterov { 122b89deed3SOleg Nesterov set_wq_data(work, cwq); 123b89deed3SOleg Nesterov if (tail) 124b89deed3SOleg Nesterov list_add_tail(&work->entry, &cwq->worklist); 125b89deed3SOleg Nesterov else 126b89deed3SOleg Nesterov list_add(&work->entry, &cwq->worklist); 127b89deed3SOleg Nesterov wake_up(&cwq->more_work); 128b89deed3SOleg Nesterov } 129b89deed3SOleg Nesterov 1301da177e4SLinus Torvalds /* Preempt must be disabled. */ 1311da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1321da177e4SLinus Torvalds struct work_struct *work) 1331da177e4SLinus Torvalds { 1341da177e4SLinus Torvalds unsigned long flags; 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 137b89deed3SOleg Nesterov insert_work(cwq, work, 1); 1381da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds 1410fcb78c2SRolf Eike Beer /** 1420fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1430fcb78c2SRolf Eike Beer * @wq: workqueue to use 1440fcb78c2SRolf Eike Beer * @work: work to queue 1450fcb78c2SRolf Eike Beer * 146057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1471da177e4SLinus Torvalds * 1481da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 1491da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 1501da177e4SLinus Torvalds */ 1511da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 1521da177e4SLinus Torvalds { 153a848e3b6SOleg Nesterov int ret = 0; 1541da177e4SLinus Torvalds 155a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 1561da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 157a848e3b6SOleg Nesterov __queue_work(wq_per_cpu(wq, get_cpu()), work); 158a848e3b6SOleg Nesterov put_cpu(); 1591da177e4SLinus Torvalds ret = 1; 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds return ret; 1621da177e4SLinus Torvalds } 163ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 1641da177e4SLinus Torvalds 16582f67cd9SIngo Molnar void delayed_work_timer_fn(unsigned long __data) 1661da177e4SLinus Torvalds { 16752bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 168ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 169ed7c0feeSOleg Nesterov struct workqueue_struct *wq = cwq->wq; 1701da177e4SLinus Torvalds 171a848e3b6SOleg Nesterov __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds 1740fcb78c2SRolf Eike Beer /** 1750fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 1760fcb78c2SRolf Eike Beer * @wq: workqueue to use 177af9997e4SRandy Dunlap * @dwork: delayable work to queue 1780fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 1790fcb78c2SRolf Eike Beer * 180057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1810fcb78c2SRolf Eike Beer */ 1821da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 18352bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 1841da177e4SLinus Torvalds { 18563bc0362SOleg Nesterov timer_stats_timer_set_start_info(&dwork->timer); 18652bad64dSDavid Howells if (delay == 0) 18763bc0362SOleg Nesterov return queue_work(wq, &dwork->work); 1881da177e4SLinus Torvalds 18963bc0362SOleg Nesterov return queue_delayed_work_on(-1, wq, dwork, delay); 1901da177e4SLinus Torvalds } 191ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 1921da177e4SLinus Torvalds 1930fcb78c2SRolf Eike Beer /** 1940fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 1950fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 1960fcb78c2SRolf Eike Beer * @wq: workqueue to use 197af9997e4SRandy Dunlap * @dwork: work to queue 1980fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 1990fcb78c2SRolf Eike Beer * 200057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2010fcb78c2SRolf Eike Beer */ 2027a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 20352bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2047a6bc1cdSVenkatesh Pallipadi { 2057a6bc1cdSVenkatesh Pallipadi int ret = 0; 20652bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 20752bad64dSDavid Howells struct work_struct *work = &dwork->work; 2087a6bc1cdSVenkatesh Pallipadi 209a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2107a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2117a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2127a6bc1cdSVenkatesh Pallipadi 213ed7c0feeSOleg Nesterov /* This stores cwq for the moment, for the timer_fn */ 214a848e3b6SOleg Nesterov set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); 2157a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 21652bad64dSDavid Howells timer->data = (unsigned long)dwork; 2177a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 21863bc0362SOleg Nesterov 21963bc0362SOleg Nesterov if (unlikely(cpu >= 0)) 2207a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 22163bc0362SOleg Nesterov else 22263bc0362SOleg Nesterov add_timer(timer); 2237a6bc1cdSVenkatesh Pallipadi ret = 1; 2247a6bc1cdSVenkatesh Pallipadi } 2257a6bc1cdSVenkatesh Pallipadi return ret; 2267a6bc1cdSVenkatesh Pallipadi } 227ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2281da177e4SLinus Torvalds 229858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 2301da177e4SLinus Torvalds { 231f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 2321da177e4SLinus Torvalds cwq->run_depth++; 2331da177e4SLinus Torvalds if (cwq->run_depth > 3) { 2341da177e4SLinus Torvalds /* morton gets to eat his hat */ 2351da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 2361da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 2371da177e4SLinus Torvalds dump_stack(); 2381da177e4SLinus Torvalds } 2391da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 2401da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 2411da177e4SLinus Torvalds struct work_struct, entry); 2426bb49e59SDavid Howells work_func_t f = work->func; 2431da177e4SLinus Torvalds 244b89deed3SOleg Nesterov cwq->current_work = work; 2451da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 246f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 2471da177e4SLinus Torvalds 248365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 249*23b2e599SOleg Nesterov work_clear_pending(work); 25065f27f38SDavid Howells f(work); 2511da177e4SLinus Torvalds 252d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 253d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 254d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 255d5abe669SPeter Zijlstra current->comm, preempt_count(), 256d5abe669SPeter Zijlstra current->pid); 257d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 258d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 259d5abe669SPeter Zijlstra debug_show_held_locks(current); 260d5abe669SPeter Zijlstra dump_stack(); 261d5abe669SPeter Zijlstra } 262d5abe669SPeter Zijlstra 263f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 264b89deed3SOleg Nesterov cwq->current_work = NULL; 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds cwq->run_depth--; 267f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 2681da177e4SLinus Torvalds } 2691da177e4SLinus Torvalds 2703af24433SOleg Nesterov /* 2713af24433SOleg Nesterov * NOTE: the caller must not touch *cwq if this func returns true 2723af24433SOleg Nesterov */ 2733af24433SOleg Nesterov static int cwq_should_stop(struct cpu_workqueue_struct *cwq) 2743af24433SOleg Nesterov { 2753af24433SOleg Nesterov int should_stop = cwq->should_stop; 2763af24433SOleg Nesterov 2773af24433SOleg Nesterov if (unlikely(should_stop)) { 2783af24433SOleg Nesterov spin_lock_irq(&cwq->lock); 2793af24433SOleg Nesterov should_stop = cwq->should_stop && list_empty(&cwq->worklist); 2803af24433SOleg Nesterov if (should_stop) 2813af24433SOleg Nesterov cwq->thread = NULL; 2823af24433SOleg Nesterov spin_unlock_irq(&cwq->lock); 2833af24433SOleg Nesterov } 2843af24433SOleg Nesterov 2853af24433SOleg Nesterov return should_stop; 2863af24433SOleg Nesterov } 2873af24433SOleg Nesterov 2881da177e4SLinus Torvalds static int worker_thread(void *__cwq) 2891da177e4SLinus Torvalds { 2901da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 2913af24433SOleg Nesterov DEFINE_WAIT(wait); 2921da177e4SLinus Torvalds struct k_sigaction sa; 2931da177e4SLinus Torvalds sigset_t blocked; 2941da177e4SLinus Torvalds 295319c2a98SOleg Nesterov if (!cwq->wq->freezeable) 2961da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds set_user_nice(current, -5); 2991da177e4SLinus Torvalds 3001da177e4SLinus Torvalds /* Block and flush all signals */ 3011da177e4SLinus Torvalds sigfillset(&blocked); 3021da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 3031da177e4SLinus Torvalds flush_signals(current); 3041da177e4SLinus Torvalds 30546934023SChristoph Lameter /* 30646934023SChristoph Lameter * We inherited MPOL_INTERLEAVE from the booting kernel. 30746934023SChristoph Lameter * Set MPOL_DEFAULT to insure node local allocations. 30846934023SChristoph Lameter */ 30946934023SChristoph Lameter numa_default_policy(); 31046934023SChristoph Lameter 3111da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 3121da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 3131da177e4SLinus Torvalds sa.sa.sa_flags = 0; 3141da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 3151da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 3161da177e4SLinus Torvalds 3173af24433SOleg Nesterov for (;;) { 318319c2a98SOleg Nesterov if (cwq->wq->freezeable) 319341a5958SRafael J. Wysocki try_to_freeze(); 320341a5958SRafael J. Wysocki 3213af24433SOleg Nesterov prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 3223af24433SOleg Nesterov if (!cwq->should_stop && list_empty(&cwq->worklist)) 3231da177e4SLinus Torvalds schedule(); 3243af24433SOleg Nesterov finish_wait(&cwq->more_work, &wait); 3251da177e4SLinus Torvalds 3263af24433SOleg Nesterov if (cwq_should_stop(cwq)) 3273af24433SOleg Nesterov break; 3283af24433SOleg Nesterov 3291da177e4SLinus Torvalds run_workqueue(cwq); 3301da177e4SLinus Torvalds } 3313af24433SOleg Nesterov 3321da177e4SLinus Torvalds return 0; 3331da177e4SLinus Torvalds } 3341da177e4SLinus Torvalds 335fc2e4d70SOleg Nesterov struct wq_barrier { 336fc2e4d70SOleg Nesterov struct work_struct work; 337fc2e4d70SOleg Nesterov struct completion done; 338fc2e4d70SOleg Nesterov }; 339fc2e4d70SOleg Nesterov 340fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 341fc2e4d70SOleg Nesterov { 342fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 343fc2e4d70SOleg Nesterov complete(&barr->done); 344fc2e4d70SOleg Nesterov } 345fc2e4d70SOleg Nesterov 34683c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 34783c22520SOleg Nesterov struct wq_barrier *barr, int tail) 348fc2e4d70SOleg Nesterov { 349fc2e4d70SOleg Nesterov INIT_WORK(&barr->work, wq_barrier_func); 350fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 351fc2e4d70SOleg Nesterov 352fc2e4d70SOleg Nesterov init_completion(&barr->done); 35383c22520SOleg Nesterov 35483c22520SOleg Nesterov insert_work(cwq, &barr->work, tail); 355fc2e4d70SOleg Nesterov } 356fc2e4d70SOleg Nesterov 3571da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 3581da177e4SLinus Torvalds { 3591da177e4SLinus Torvalds if (cwq->thread == current) { 3601da177e4SLinus Torvalds /* 3611da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 3621da177e4SLinus Torvalds * it by hand rather than deadlocking. 3631da177e4SLinus Torvalds */ 3641da177e4SLinus Torvalds run_workqueue(cwq); 3651da177e4SLinus Torvalds } else { 366fc2e4d70SOleg Nesterov struct wq_barrier barr; 36783c22520SOleg Nesterov int active = 0; 3681da177e4SLinus Torvalds 36983c22520SOleg Nesterov spin_lock_irq(&cwq->lock); 37083c22520SOleg Nesterov if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 37183c22520SOleg Nesterov insert_wq_barrier(cwq, &barr, 1); 37283c22520SOleg Nesterov active = 1; 37383c22520SOleg Nesterov } 37483c22520SOleg Nesterov spin_unlock_irq(&cwq->lock); 3751da177e4SLinus Torvalds 376d721304dSOleg Nesterov if (active) 377fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 3781da177e4SLinus Torvalds } 37983c22520SOleg Nesterov } 3801da177e4SLinus Torvalds 3810fcb78c2SRolf Eike Beer /** 3821da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 3830fcb78c2SRolf Eike Beer * @wq: workqueue to flush 3841da177e4SLinus Torvalds * 3851da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 3861da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 3871da177e4SLinus Torvalds * 388fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 389fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 3901da177e4SLinus Torvalds * 3911da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 3921da177e4SLinus Torvalds * helper threads to do it. 3931da177e4SLinus Torvalds */ 3941da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 3951da177e4SLinus Torvalds { 396b1f4ec17SOleg Nesterov const cpumask_t *cpu_map = wq_cpu_map(wq); 397cce1a165SOleg Nesterov int cpu; 398b1f4ec17SOleg Nesterov 399f293ea92SOleg Nesterov might_sleep(); 400b1f4ec17SOleg Nesterov for_each_cpu_mask(cpu, *cpu_map) 40189ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 4021da177e4SLinus Torvalds } 403ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 4041da177e4SLinus Torvalds 405b89deed3SOleg Nesterov static void wait_on_work(struct cpu_workqueue_struct *cwq, 406b89deed3SOleg Nesterov struct work_struct *work) 407b89deed3SOleg Nesterov { 408b89deed3SOleg Nesterov struct wq_barrier barr; 409b89deed3SOleg Nesterov int running = 0; 410b89deed3SOleg Nesterov 411b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 412b89deed3SOleg Nesterov if (unlikely(cwq->current_work == work)) { 41383c22520SOleg Nesterov insert_wq_barrier(cwq, &barr, 0); 414b89deed3SOleg Nesterov running = 1; 415b89deed3SOleg Nesterov } 416b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 417b89deed3SOleg Nesterov 4183af24433SOleg Nesterov if (unlikely(running)) 419b89deed3SOleg Nesterov wait_for_completion(&barr.done); 420b89deed3SOleg Nesterov } 421b89deed3SOleg Nesterov 422b89deed3SOleg Nesterov /** 423b89deed3SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 424b89deed3SOleg Nesterov * @wq: the workqueue on which the work is queued 425b89deed3SOleg Nesterov * @work: the work which is to be flushed 426b89deed3SOleg Nesterov * 427b89deed3SOleg Nesterov * flush_work() will attempt to cancel the work if it is queued. If the work's 428b89deed3SOleg Nesterov * callback appears to be running, flush_work() will block until it has 429b89deed3SOleg Nesterov * completed. 430b89deed3SOleg Nesterov * 431b89deed3SOleg Nesterov * flush_work() is designed to be used when the caller is tearing down data 432b89deed3SOleg Nesterov * structures which the callback function operates upon. It is expected that, 433b89deed3SOleg Nesterov * prior to calling flush_work(), the caller has arranged for the work to not 434b89deed3SOleg Nesterov * be requeued. 435b89deed3SOleg Nesterov */ 436b89deed3SOleg Nesterov void flush_work(struct workqueue_struct *wq, struct work_struct *work) 437b89deed3SOleg Nesterov { 438b1f4ec17SOleg Nesterov const cpumask_t *cpu_map = wq_cpu_map(wq); 439b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 440b1f4ec17SOleg Nesterov int cpu; 441b89deed3SOleg Nesterov 442f293ea92SOleg Nesterov might_sleep(); 443f293ea92SOleg Nesterov 444b89deed3SOleg Nesterov cwq = get_wq_data(work); 445b89deed3SOleg Nesterov /* Was it ever queued ? */ 446b89deed3SOleg Nesterov if (!cwq) 4473af24433SOleg Nesterov return; 448b89deed3SOleg Nesterov 449b89deed3SOleg Nesterov /* 4503af24433SOleg Nesterov * This work can't be re-queued, no need to re-check that 4513af24433SOleg Nesterov * get_wq_data() is still the same when we take cwq->lock. 452b89deed3SOleg Nesterov */ 453b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 454b89deed3SOleg Nesterov list_del_init(&work->entry); 455*23b2e599SOleg Nesterov work_clear_pending(work); 456b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 457b89deed3SOleg Nesterov 458b1f4ec17SOleg Nesterov for_each_cpu_mask(cpu, *cpu_map) 459b89deed3SOleg Nesterov wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 460b89deed3SOleg Nesterov } 461b89deed3SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 462b89deed3SOleg Nesterov 4631da177e4SLinus Torvalds 4641da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 4651da177e4SLinus Torvalds 4660fcb78c2SRolf Eike Beer /** 4670fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 4680fcb78c2SRolf Eike Beer * @work: job to be done 4690fcb78c2SRolf Eike Beer * 4700fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 4710fcb78c2SRolf Eike Beer */ 4721da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 4731da177e4SLinus Torvalds { 4741da177e4SLinus Torvalds return queue_work(keventd_wq, work); 4751da177e4SLinus Torvalds } 476ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 4771da177e4SLinus Torvalds 4780fcb78c2SRolf Eike Beer /** 4790fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 48052bad64dSDavid Howells * @dwork: job to be done 48152bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 4820fcb78c2SRolf Eike Beer * 4830fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 4840fcb78c2SRolf Eike Beer * workqueue. 4850fcb78c2SRolf Eike Beer */ 48682f67cd9SIngo Molnar int fastcall schedule_delayed_work(struct delayed_work *dwork, 48782f67cd9SIngo Molnar unsigned long delay) 4881da177e4SLinus Torvalds { 48982f67cd9SIngo Molnar timer_stats_timer_set_start_info(&dwork->timer); 49052bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 4911da177e4SLinus Torvalds } 492ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 4931da177e4SLinus Torvalds 4940fcb78c2SRolf Eike Beer /** 4950fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 4960fcb78c2SRolf Eike Beer * @cpu: cpu to use 49752bad64dSDavid Howells * @dwork: job to be done 4980fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 4990fcb78c2SRolf Eike Beer * 5000fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5010fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 5020fcb78c2SRolf Eike Beer */ 5031da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 50452bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 5051da177e4SLinus Torvalds { 50652bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 5071da177e4SLinus Torvalds } 508ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 5091da177e4SLinus Torvalds 510b6136773SAndrew Morton /** 511b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 512b6136773SAndrew Morton * @func: the function to call 513b6136773SAndrew Morton * 514b6136773SAndrew Morton * Returns zero on success. 515b6136773SAndrew Morton * Returns -ve errno on failure. 516b6136773SAndrew Morton * 517b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 518b6136773SAndrew Morton * 519b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 520b6136773SAndrew Morton */ 52165f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 52215316ba8SChristoph Lameter { 52315316ba8SChristoph Lameter int cpu; 524b6136773SAndrew Morton struct work_struct *works; 52515316ba8SChristoph Lameter 526b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 527b6136773SAndrew Morton if (!works) 52815316ba8SChristoph Lameter return -ENOMEM; 529b6136773SAndrew Morton 530e18f3ffbSAndrew Morton preempt_disable(); /* CPU hotplug */ 53115316ba8SChristoph Lameter for_each_online_cpu(cpu) { 5329bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 5339bfb1839SIngo Molnar 5349bfb1839SIngo Molnar INIT_WORK(work, func); 5359bfb1839SIngo Molnar set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 5369bfb1839SIngo Molnar __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); 53715316ba8SChristoph Lameter } 538e18f3ffbSAndrew Morton preempt_enable(); 53915316ba8SChristoph Lameter flush_workqueue(keventd_wq); 540b6136773SAndrew Morton free_percpu(works); 54115316ba8SChristoph Lameter return 0; 54215316ba8SChristoph Lameter } 54315316ba8SChristoph Lameter 5441da177e4SLinus Torvalds void flush_scheduled_work(void) 5451da177e4SLinus Torvalds { 5461da177e4SLinus Torvalds flush_workqueue(keventd_wq); 5471da177e4SLinus Torvalds } 548ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 5491da177e4SLinus Torvalds 550b89deed3SOleg Nesterov void flush_work_keventd(struct work_struct *work) 551b89deed3SOleg Nesterov { 552b89deed3SOleg Nesterov flush_work(keventd_wq, work); 553b89deed3SOleg Nesterov } 554b89deed3SOleg Nesterov EXPORT_SYMBOL(flush_work_keventd); 555b89deed3SOleg Nesterov 5561da177e4SLinus Torvalds /** 5571634c48fSOleg Nesterov * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work. 55852bad64dSDavid Howells * @dwork: the delayed work struct 559ed7c0feeSOleg Nesterov * 560ed7c0feeSOleg Nesterov * Note that the work callback function may still be running on return from 561ed7c0feeSOleg Nesterov * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it. 5621da177e4SLinus Torvalds */ 5631634c48fSOleg Nesterov void cancel_rearming_delayed_work(struct delayed_work *dwork) 5641da177e4SLinus Torvalds { 5651634c48fSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 5661634c48fSOleg Nesterov 567dfb4b82eSOleg Nesterov /* Was it ever queued ? */ 5681634c48fSOleg Nesterov if (cwq != NULL) { 5691634c48fSOleg Nesterov struct workqueue_struct *wq = cwq->wq; 570dfb4b82eSOleg Nesterov 57152bad64dSDavid Howells while (!cancel_delayed_work(dwork)) 5721da177e4SLinus Torvalds flush_workqueue(wq); 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 5761da177e4SLinus Torvalds 5771fa44ecaSJames Bottomley /** 5781fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 5791fa44ecaSJames Bottomley * @fn: the function to execute 5801fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 5811fa44ecaSJames Bottomley * be available when the work executes) 5821fa44ecaSJames Bottomley * 5831fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 5841fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 5851fa44ecaSJames Bottomley * 5861fa44ecaSJames Bottomley * Returns: 0 - function was executed 5871fa44ecaSJames Bottomley * 1 - function was scheduled for execution 5881fa44ecaSJames Bottomley */ 58965f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 5901fa44ecaSJames Bottomley { 5911fa44ecaSJames Bottomley if (!in_interrupt()) { 59265f27f38SDavid Howells fn(&ew->work); 5931fa44ecaSJames Bottomley return 0; 5941fa44ecaSJames Bottomley } 5951fa44ecaSJames Bottomley 59665f27f38SDavid Howells INIT_WORK(&ew->work, fn); 5971fa44ecaSJames Bottomley schedule_work(&ew->work); 5981fa44ecaSJames Bottomley 5991fa44ecaSJames Bottomley return 1; 6001fa44ecaSJames Bottomley } 6011fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 6021fa44ecaSJames Bottomley 6031da177e4SLinus Torvalds int keventd_up(void) 6041da177e4SLinus Torvalds { 6051da177e4SLinus Torvalds return keventd_wq != NULL; 6061da177e4SLinus Torvalds } 6071da177e4SLinus Torvalds 6081da177e4SLinus Torvalds int current_is_keventd(void) 6091da177e4SLinus Torvalds { 6101da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 6111da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 6121da177e4SLinus Torvalds int ret = 0; 6131da177e4SLinus Torvalds 6141da177e4SLinus Torvalds BUG_ON(!keventd_wq); 6151da177e4SLinus Torvalds 61689ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 6171da177e4SLinus Torvalds if (current == cwq->thread) 6181da177e4SLinus Torvalds ret = 1; 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds return ret; 6211da177e4SLinus Torvalds 6221da177e4SLinus Torvalds } 6231da177e4SLinus Torvalds 6243af24433SOleg Nesterov static struct cpu_workqueue_struct * 6253af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu) 6261da177e4SLinus Torvalds { 62789ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 6283af24433SOleg Nesterov 6293af24433SOleg Nesterov cwq->wq = wq; 6303af24433SOleg Nesterov spin_lock_init(&cwq->lock); 6313af24433SOleg Nesterov INIT_LIST_HEAD(&cwq->worklist); 6323af24433SOleg Nesterov init_waitqueue_head(&cwq->more_work); 6333af24433SOleg Nesterov 6343af24433SOleg Nesterov return cwq; 6353af24433SOleg Nesterov } 6363af24433SOleg Nesterov 6373af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 6383af24433SOleg Nesterov { 6393af24433SOleg Nesterov struct workqueue_struct *wq = cwq->wq; 6403af24433SOleg Nesterov const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; 6413af24433SOleg Nesterov struct task_struct *p; 6423af24433SOleg Nesterov 6433af24433SOleg Nesterov p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 6443af24433SOleg Nesterov /* 6453af24433SOleg Nesterov * Nobody can add the work_struct to this cwq, 6463af24433SOleg Nesterov * if (caller is __create_workqueue) 6473af24433SOleg Nesterov * nobody should see this wq 6483af24433SOleg Nesterov * else // caller is CPU_UP_PREPARE 6493af24433SOleg Nesterov * cpu is not on cpu_online_map 6503af24433SOleg Nesterov * so we can abort safely. 6513af24433SOleg Nesterov */ 6523af24433SOleg Nesterov if (IS_ERR(p)) 6533af24433SOleg Nesterov return PTR_ERR(p); 6543af24433SOleg Nesterov 6553af24433SOleg Nesterov cwq->thread = p; 6563af24433SOleg Nesterov cwq->should_stop = 0; 6573af24433SOleg Nesterov 6583af24433SOleg Nesterov return 0; 6593af24433SOleg Nesterov } 6603af24433SOleg Nesterov 66106ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 66206ba38a9SOleg Nesterov { 66306ba38a9SOleg Nesterov struct task_struct *p = cwq->thread; 66406ba38a9SOleg Nesterov 66506ba38a9SOleg Nesterov if (p != NULL) { 66606ba38a9SOleg Nesterov if (cpu >= 0) 66706ba38a9SOleg Nesterov kthread_bind(p, cpu); 66806ba38a9SOleg Nesterov wake_up_process(p); 66906ba38a9SOleg Nesterov } 67006ba38a9SOleg Nesterov } 67106ba38a9SOleg Nesterov 6723af24433SOleg Nesterov struct workqueue_struct *__create_workqueue(const char *name, 6733af24433SOleg Nesterov int singlethread, int freezeable) 6743af24433SOleg Nesterov { 6753af24433SOleg Nesterov struct workqueue_struct *wq; 6763af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 6773af24433SOleg Nesterov int err = 0, cpu; 6783af24433SOleg Nesterov 6793af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 6803af24433SOleg Nesterov if (!wq) 6813af24433SOleg Nesterov return NULL; 6823af24433SOleg Nesterov 6833af24433SOleg Nesterov wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 6843af24433SOleg Nesterov if (!wq->cpu_wq) { 6853af24433SOleg Nesterov kfree(wq); 6863af24433SOleg Nesterov return NULL; 6873af24433SOleg Nesterov } 6883af24433SOleg Nesterov 6893af24433SOleg Nesterov wq->name = name; 690cce1a165SOleg Nesterov wq->singlethread = singlethread; 6913af24433SOleg Nesterov wq->freezeable = freezeable; 692cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 6933af24433SOleg Nesterov 6943af24433SOleg Nesterov if (singlethread) { 6953af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, singlethread_cpu); 6963af24433SOleg Nesterov err = create_workqueue_thread(cwq, singlethread_cpu); 69706ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 6983af24433SOleg Nesterov } else { 6993af24433SOleg Nesterov mutex_lock(&workqueue_mutex); 7003af24433SOleg Nesterov list_add(&wq->list, &workqueues); 7013af24433SOleg Nesterov 7023af24433SOleg Nesterov for_each_possible_cpu(cpu) { 7033af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, cpu); 7043af24433SOleg Nesterov if (err || !cpu_online(cpu)) 7053af24433SOleg Nesterov continue; 7063af24433SOleg Nesterov err = create_workqueue_thread(cwq, cpu); 70706ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 7083af24433SOleg Nesterov } 7093af24433SOleg Nesterov mutex_unlock(&workqueue_mutex); 7103af24433SOleg Nesterov } 7113af24433SOleg Nesterov 7123af24433SOleg Nesterov if (err) { 7133af24433SOleg Nesterov destroy_workqueue(wq); 7143af24433SOleg Nesterov wq = NULL; 7153af24433SOleg Nesterov } 7163af24433SOleg Nesterov return wq; 7173af24433SOleg Nesterov } 7183af24433SOleg Nesterov EXPORT_SYMBOL_GPL(__create_workqueue); 7193af24433SOleg Nesterov 7203af24433SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 7213af24433SOleg Nesterov { 7223af24433SOleg Nesterov struct wq_barrier barr; 7233af24433SOleg Nesterov int alive = 0; 7241da177e4SLinus Torvalds 7251da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 7263af24433SOleg Nesterov if (cwq->thread != NULL) { 7273af24433SOleg Nesterov insert_wq_barrier(cwq, &barr, 1); 7283af24433SOleg Nesterov cwq->should_stop = 1; 7293af24433SOleg Nesterov alive = 1; 7301da177e4SLinus Torvalds } 7311da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 7323af24433SOleg Nesterov 7333af24433SOleg Nesterov if (alive) { 7343af24433SOleg Nesterov wait_for_completion(&barr.done); 7353af24433SOleg Nesterov 7363af24433SOleg Nesterov while (unlikely(cwq->thread != NULL)) 7373af24433SOleg Nesterov cpu_relax(); 7383af24433SOleg Nesterov /* 7393af24433SOleg Nesterov * Wait until cwq->thread unlocks cwq->lock, 7403af24433SOleg Nesterov * it won't touch *cwq after that. 7413af24433SOleg Nesterov */ 7423af24433SOleg Nesterov smp_rmb(); 7433af24433SOleg Nesterov spin_unlock_wait(&cwq->lock); 7443af24433SOleg Nesterov } 7451da177e4SLinus Torvalds } 7461da177e4SLinus Torvalds 7473af24433SOleg Nesterov /** 7483af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 7493af24433SOleg Nesterov * @wq: target workqueue 7503af24433SOleg Nesterov * 7513af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 7523af24433SOleg Nesterov */ 7533af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 7543af24433SOleg Nesterov { 755b1f4ec17SOleg Nesterov const cpumask_t *cpu_map = wq_cpu_map(wq); 7563af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 7573af24433SOleg Nesterov int cpu; 7583af24433SOleg Nesterov 7593af24433SOleg Nesterov mutex_lock(&workqueue_mutex); 7603af24433SOleg Nesterov list_del(&wq->list); 7613af24433SOleg Nesterov mutex_unlock(&workqueue_mutex); 7623af24433SOleg Nesterov 763b1f4ec17SOleg Nesterov for_each_cpu_mask(cpu, *cpu_map) { 7643af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 7653af24433SOleg Nesterov cleanup_workqueue_thread(cwq, cpu); 7663af24433SOleg Nesterov } 7673af24433SOleg Nesterov 7683af24433SOleg Nesterov free_percpu(wq->cpu_wq); 7693af24433SOleg Nesterov kfree(wq); 7703af24433SOleg Nesterov } 7713af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 7723af24433SOleg Nesterov 7739c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 7741da177e4SLinus Torvalds unsigned long action, 7751da177e4SLinus Torvalds void *hcpu) 7761da177e4SLinus Torvalds { 7773af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 7783af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 7791da177e4SLinus Torvalds struct workqueue_struct *wq; 7801da177e4SLinus Torvalds 7811da177e4SLinus Torvalds switch (action) { 7823af24433SOleg Nesterov case CPU_LOCK_ACQUIRE: 7839b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7843af24433SOleg Nesterov return NOTIFY_OK; 7853af24433SOleg Nesterov 7863af24433SOleg Nesterov case CPU_LOCK_RELEASE: 7873af24433SOleg Nesterov mutex_unlock(&workqueue_mutex); 7883af24433SOleg Nesterov return NOTIFY_OK; 7893af24433SOleg Nesterov 7903af24433SOleg Nesterov case CPU_UP_PREPARE: 7913af24433SOleg Nesterov cpu_set(cpu, cpu_populated_map); 7923af24433SOleg Nesterov } 7933af24433SOleg Nesterov 7941da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 7953af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 7963af24433SOleg Nesterov 7973af24433SOleg Nesterov switch (action) { 7983af24433SOleg Nesterov case CPU_UP_PREPARE: 7993af24433SOleg Nesterov if (!create_workqueue_thread(cwq, cpu)) 8001da177e4SLinus Torvalds break; 8013af24433SOleg Nesterov printk(KERN_ERR "workqueue for %i failed\n", cpu); 8023af24433SOleg Nesterov return NOTIFY_BAD; 8031da177e4SLinus Torvalds 8041da177e4SLinus Torvalds case CPU_ONLINE: 80506ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 8061da177e4SLinus Torvalds break; 8071da177e4SLinus Torvalds 8081da177e4SLinus Torvalds case CPU_UP_CANCELED: 80906ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 8101da177e4SLinus Torvalds case CPU_DEAD: 8113af24433SOleg Nesterov cleanup_workqueue_thread(cwq, cpu); 8121da177e4SLinus Torvalds break; 8131da177e4SLinus Torvalds } 8143af24433SOleg Nesterov } 8151da177e4SLinus Torvalds 8161da177e4SLinus Torvalds return NOTIFY_OK; 8171da177e4SLinus Torvalds } 8181da177e4SLinus Torvalds 819c12920d1SOleg Nesterov void __init init_workqueues(void) 8201da177e4SLinus Torvalds { 8213af24433SOleg Nesterov cpu_populated_map = cpu_online_map; 822f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 823b1f4ec17SOleg Nesterov cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); 8241da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 8251da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 8261da177e4SLinus Torvalds BUG_ON(!keventd_wq); 8271da177e4SLinus Torvalds } 828