11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <[email protected]> 12e1f8e874SFrancois Cami * Andrew Morton 131da177e4SLinus Torvalds * Kai Petzke <[email protected]> 141da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 1589ada679SChristoph Lameter * 16cde53535SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 354e6045f1SJohannes Berg #include <linux/lockdep.h> 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds /* 38f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 39f756d5e2SNathan Lynch * possible cpu). 401da177e4SLinus Torvalds */ 411da177e4SLinus Torvalds struct cpu_workqueue_struct { 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds spinlock_t lock; 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds struct list_head worklist; 461da177e4SLinus Torvalds wait_queue_head_t more_work; 473af24433SOleg Nesterov struct work_struct *current_work; 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds struct workqueue_struct *wq; 5036c8b586SIngo Molnar struct task_struct *thread; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 531da177e4SLinus Torvalds } ____cacheline_aligned; 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 561da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 571da177e4SLinus Torvalds * per-CPU workqueues: 581da177e4SLinus Torvalds */ 591da177e4SLinus Torvalds struct workqueue_struct { 6089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 61cce1a165SOleg Nesterov struct list_head list; 621da177e4SLinus Torvalds const char *name; 63cce1a165SOleg Nesterov int singlethread; 64319c2a98SOleg Nesterov int freezeable; /* Freeze threads during suspend */ 650d557dc9SHeiko Carstens int rt; 664e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 674e6045f1SJohannes Berg struct lockdep_map lockdep_map; 684e6045f1SJohannes Berg #endif 691da177e4SLinus Torvalds }; 701da177e4SLinus Torvalds 7195402b38SGautham R Shenoy /* Serializes the accesses to the list of workqueues. */ 7295402b38SGautham R Shenoy static DEFINE_SPINLOCK(workqueue_lock); 731da177e4SLinus Torvalds static LIST_HEAD(workqueues); 741da177e4SLinus Torvalds 753af24433SOleg Nesterov static int singlethread_cpu __read_mostly; 76e7577c50SRusty Russell static const struct cpumask *cpu_singlethread_map __read_mostly; 7714441960SOleg Nesterov /* 7814441960SOleg Nesterov * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 7914441960SOleg Nesterov * flushes cwq->worklist. This means that flush_workqueue/wait_on_work 8014441960SOleg Nesterov * which comes in between can't use for_each_online_cpu(). We could 8114441960SOleg Nesterov * use cpu_possible_map, the cpumask below is more a documentation 8214441960SOleg Nesterov * than optimization. 8314441960SOleg Nesterov */ 84e7577c50SRusty Russell static cpumask_var_t cpu_populated_map __read_mostly; 85f756d5e2SNathan Lynch 861da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 876cc88bc4SDavid Howells static inline int is_wq_single_threaded(struct workqueue_struct *wq) 881da177e4SLinus Torvalds { 89cce1a165SOleg Nesterov return wq->singlethread; 901da177e4SLinus Torvalds } 911da177e4SLinus Torvalds 92e7577c50SRusty Russell static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) 93b1f4ec17SOleg Nesterov { 946cc88bc4SDavid Howells return is_wq_single_threaded(wq) 95e7577c50SRusty Russell ? cpu_singlethread_map : cpu_populated_map; 96b1f4ec17SOleg Nesterov } 97b1f4ec17SOleg Nesterov 98a848e3b6SOleg Nesterov static 99a848e3b6SOleg Nesterov struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) 100a848e3b6SOleg Nesterov { 1016cc88bc4SDavid Howells if (unlikely(is_wq_single_threaded(wq))) 102a848e3b6SOleg Nesterov cpu = singlethread_cpu; 103a848e3b6SOleg Nesterov return per_cpu_ptr(wq->cpu_wq, cpu); 104a848e3b6SOleg Nesterov } 105a848e3b6SOleg Nesterov 1064594bf15SDavid Howells /* 1074594bf15SDavid Howells * Set the workqueue on which a work item is to be run 1084594bf15SDavid Howells * - Must *only* be called if the pending flag is set 1094594bf15SDavid Howells */ 110ed7c0feeSOleg Nesterov static inline void set_wq_data(struct work_struct *work, 111ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq) 112365970a1SDavid Howells { 1134594bf15SDavid Howells unsigned long new; 114365970a1SDavid Howells 1154594bf15SDavid Howells BUG_ON(!work_pending(work)); 1164594bf15SDavid Howells 117ed7c0feeSOleg Nesterov new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); 118a08727baSLinus Torvalds new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); 119a08727baSLinus Torvalds atomic_long_set(&work->data, new); 120365970a1SDavid Howells } 121365970a1SDavid Howells 122ed7c0feeSOleg Nesterov static inline 123ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) 124365970a1SDavid Howells { 125a08727baSLinus Torvalds return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 126365970a1SDavid Howells } 127365970a1SDavid Howells 128b89deed3SOleg Nesterov static void insert_work(struct cpu_workqueue_struct *cwq, 1291a4d9b0aSOleg Nesterov struct work_struct *work, struct list_head *head) 130b89deed3SOleg Nesterov { 131b89deed3SOleg Nesterov set_wq_data(work, cwq); 1326e84d644SOleg Nesterov /* 1336e84d644SOleg Nesterov * Ensure that we get the right work->data if we see the 1346e84d644SOleg Nesterov * result of list_add() below, see try_to_grab_pending(). 1356e84d644SOleg Nesterov */ 1366e84d644SOleg Nesterov smp_wmb(); 1371a4d9b0aSOleg Nesterov list_add_tail(&work->entry, head); 138b89deed3SOleg Nesterov wake_up(&cwq->more_work); 139b89deed3SOleg Nesterov } 140b89deed3SOleg Nesterov 1411da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1421da177e4SLinus Torvalds struct work_struct *work) 1431da177e4SLinus Torvalds { 1441da177e4SLinus Torvalds unsigned long flags; 1451da177e4SLinus Torvalds 1461da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 1471a4d9b0aSOleg Nesterov insert_work(cwq, work, &cwq->worklist); 1481da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 1510fcb78c2SRolf Eike Beer /** 1520fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1530fcb78c2SRolf Eike Beer * @wq: workqueue to use 1540fcb78c2SRolf Eike Beer * @work: work to queue 1550fcb78c2SRolf Eike Beer * 156057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1571da177e4SLinus Torvalds * 15800dfcaf7SOleg Nesterov * We queue the work to the CPU on which it was submitted, but if the CPU dies 15900dfcaf7SOleg Nesterov * it can be processed by another CPU. 1601da177e4SLinus Torvalds */ 1617ad5b3a5SHarvey Harrison int queue_work(struct workqueue_struct *wq, struct work_struct *work) 1621da177e4SLinus Torvalds { 163ef1ca236SOleg Nesterov int ret; 1641da177e4SLinus Torvalds 165ef1ca236SOleg Nesterov ret = queue_work_on(get_cpu(), wq, work); 166a848e3b6SOleg Nesterov put_cpu(); 167ef1ca236SOleg Nesterov 1681da177e4SLinus Torvalds return ret; 1691da177e4SLinus Torvalds } 170ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 1711da177e4SLinus Torvalds 172c1a220e7SZhang Rui /** 173c1a220e7SZhang Rui * queue_work_on - queue work on specific cpu 174c1a220e7SZhang Rui * @cpu: CPU number to execute work on 175c1a220e7SZhang Rui * @wq: workqueue to use 176c1a220e7SZhang Rui * @work: work to queue 177c1a220e7SZhang Rui * 178c1a220e7SZhang Rui * Returns 0 if @work was already on a queue, non-zero otherwise. 179c1a220e7SZhang Rui * 180c1a220e7SZhang Rui * We queue the work to a specific CPU, the caller must ensure it 181c1a220e7SZhang Rui * can't go away. 182c1a220e7SZhang Rui */ 183c1a220e7SZhang Rui int 184c1a220e7SZhang Rui queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) 185c1a220e7SZhang Rui { 186c1a220e7SZhang Rui int ret = 0; 187c1a220e7SZhang Rui 188c1a220e7SZhang Rui if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 189c1a220e7SZhang Rui BUG_ON(!list_empty(&work->entry)); 190c1a220e7SZhang Rui __queue_work(wq_per_cpu(wq, cpu), work); 191c1a220e7SZhang Rui ret = 1; 192c1a220e7SZhang Rui } 193c1a220e7SZhang Rui return ret; 194c1a220e7SZhang Rui } 195c1a220e7SZhang Rui EXPORT_SYMBOL_GPL(queue_work_on); 196c1a220e7SZhang Rui 1976d141c3fSLi Zefan static void delayed_work_timer_fn(unsigned long __data) 1981da177e4SLinus Torvalds { 19952bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 200ed7c0feeSOleg Nesterov struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 201ed7c0feeSOleg Nesterov struct workqueue_struct *wq = cwq->wq; 2021da177e4SLinus Torvalds 203a848e3b6SOleg Nesterov __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); 2041da177e4SLinus Torvalds } 2051da177e4SLinus Torvalds 2060fcb78c2SRolf Eike Beer /** 2070fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 2080fcb78c2SRolf Eike Beer * @wq: workqueue to use 209af9997e4SRandy Dunlap * @dwork: delayable work to queue 2100fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2110fcb78c2SRolf Eike Beer * 212057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2130fcb78c2SRolf Eike Beer */ 2147ad5b3a5SHarvey Harrison int queue_delayed_work(struct workqueue_struct *wq, 21552bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2161da177e4SLinus Torvalds { 21752bad64dSDavid Howells if (delay == 0) 21863bc0362SOleg Nesterov return queue_work(wq, &dwork->work); 2191da177e4SLinus Torvalds 22063bc0362SOleg Nesterov return queue_delayed_work_on(-1, wq, dwork, delay); 2211da177e4SLinus Torvalds } 222ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 2231da177e4SLinus Torvalds 2240fcb78c2SRolf Eike Beer /** 2250fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 2260fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 2270fcb78c2SRolf Eike Beer * @wq: workqueue to use 228af9997e4SRandy Dunlap * @dwork: work to queue 2290fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2300fcb78c2SRolf Eike Beer * 231057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2320fcb78c2SRolf Eike Beer */ 2337a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 23452bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2357a6bc1cdSVenkatesh Pallipadi { 2367a6bc1cdSVenkatesh Pallipadi int ret = 0; 23752bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 23852bad64dSDavid Howells struct work_struct *work = &dwork->work; 2397a6bc1cdSVenkatesh Pallipadi 240a08727baSLinus Torvalds if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 2417a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2427a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2437a6bc1cdSVenkatesh Pallipadi 2448a3e77ccSAndrew Liu timer_stats_timer_set_start_info(&dwork->timer); 2458a3e77ccSAndrew Liu 246ed7c0feeSOleg Nesterov /* This stores cwq for the moment, for the timer_fn */ 247a848e3b6SOleg Nesterov set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); 2487a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 24952bad64dSDavid Howells timer->data = (unsigned long)dwork; 2507a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 25163bc0362SOleg Nesterov 25263bc0362SOleg Nesterov if (unlikely(cpu >= 0)) 2537a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 25463bc0362SOleg Nesterov else 25563bc0362SOleg Nesterov add_timer(timer); 2567a6bc1cdSVenkatesh Pallipadi ret = 1; 2577a6bc1cdSVenkatesh Pallipadi } 2587a6bc1cdSVenkatesh Pallipadi return ret; 2597a6bc1cdSVenkatesh Pallipadi } 260ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2611da177e4SLinus Torvalds 262858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 2631da177e4SLinus Torvalds { 264f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 2651da177e4SLinus Torvalds cwq->run_depth++; 2661da177e4SLinus Torvalds if (cwq->run_depth > 3) { 2671da177e4SLinus Torvalds /* morton gets to eat his hat */ 2681da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 269af1f16d0SHarvey Harrison __func__, cwq->run_depth); 2701da177e4SLinus Torvalds dump_stack(); 2711da177e4SLinus Torvalds } 2721da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 2731da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 2741da177e4SLinus Torvalds struct work_struct, entry); 2756bb49e59SDavid Howells work_func_t f = work->func; 2764e6045f1SJohannes Berg #ifdef CONFIG_LOCKDEP 2774e6045f1SJohannes Berg /* 2784e6045f1SJohannes Berg * It is permissible to free the struct work_struct 2794e6045f1SJohannes Berg * from inside the function that is called from it, 2804e6045f1SJohannes Berg * this we need to take into account for lockdep too. 2814e6045f1SJohannes Berg * To avoid bogus "held lock freed" warnings as well 2824e6045f1SJohannes Berg * as problems when looking into work->lockdep_map, 2834e6045f1SJohannes Berg * make a copy and use that here. 2844e6045f1SJohannes Berg */ 2854e6045f1SJohannes Berg struct lockdep_map lockdep_map = work->lockdep_map; 2864e6045f1SJohannes Berg #endif 2871da177e4SLinus Torvalds 288b89deed3SOleg Nesterov cwq->current_work = work; 2891da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 290f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 2911da177e4SLinus Torvalds 292365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 29323b2e599SOleg Nesterov work_clear_pending(work); 2943295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 2953295f0efSIngo Molnar lock_map_acquire(&lockdep_map); 29665f27f38SDavid Howells f(work); 2973295f0efSIngo Molnar lock_map_release(&lockdep_map); 2983295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 2991da177e4SLinus Torvalds 300d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 301d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 302d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 303d5abe669SPeter Zijlstra current->comm, preempt_count(), 304ba25f9dcSPavel Emelyanov task_pid_nr(current)); 305d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 306d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 307d5abe669SPeter Zijlstra debug_show_held_locks(current); 308d5abe669SPeter Zijlstra dump_stack(); 309d5abe669SPeter Zijlstra } 310d5abe669SPeter Zijlstra 311f293ea92SOleg Nesterov spin_lock_irq(&cwq->lock); 312b89deed3SOleg Nesterov cwq->current_work = NULL; 3131da177e4SLinus Torvalds } 3141da177e4SLinus Torvalds cwq->run_depth--; 315f293ea92SOleg Nesterov spin_unlock_irq(&cwq->lock); 3161da177e4SLinus Torvalds } 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds static int worker_thread(void *__cwq) 3191da177e4SLinus Torvalds { 3201da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 3213af24433SOleg Nesterov DEFINE_WAIT(wait); 3221da177e4SLinus Torvalds 32383144186SRafael J. Wysocki if (cwq->wq->freezeable) 32483144186SRafael J. Wysocki set_freezable(); 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds set_user_nice(current, -5); 3271da177e4SLinus Torvalds 3283af24433SOleg Nesterov for (;;) { 3293af24433SOleg Nesterov prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 33014441960SOleg Nesterov if (!freezing(current) && 33114441960SOleg Nesterov !kthread_should_stop() && 33214441960SOleg Nesterov list_empty(&cwq->worklist)) 3331da177e4SLinus Torvalds schedule(); 3343af24433SOleg Nesterov finish_wait(&cwq->more_work, &wait); 3351da177e4SLinus Torvalds 33685f4186aSOleg Nesterov try_to_freeze(); 33785f4186aSOleg Nesterov 33814441960SOleg Nesterov if (kthread_should_stop()) 3393af24433SOleg Nesterov break; 3403af24433SOleg Nesterov 3411da177e4SLinus Torvalds run_workqueue(cwq); 3421da177e4SLinus Torvalds } 3433af24433SOleg Nesterov 3441da177e4SLinus Torvalds return 0; 3451da177e4SLinus Torvalds } 3461da177e4SLinus Torvalds 347fc2e4d70SOleg Nesterov struct wq_barrier { 348fc2e4d70SOleg Nesterov struct work_struct work; 349fc2e4d70SOleg Nesterov struct completion done; 350fc2e4d70SOleg Nesterov }; 351fc2e4d70SOleg Nesterov 352fc2e4d70SOleg Nesterov static void wq_barrier_func(struct work_struct *work) 353fc2e4d70SOleg Nesterov { 354fc2e4d70SOleg Nesterov struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 355fc2e4d70SOleg Nesterov complete(&barr->done); 356fc2e4d70SOleg Nesterov } 357fc2e4d70SOleg Nesterov 35883c22520SOleg Nesterov static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 3591a4d9b0aSOleg Nesterov struct wq_barrier *barr, struct list_head *head) 360fc2e4d70SOleg Nesterov { 361fc2e4d70SOleg Nesterov INIT_WORK(&barr->work, wq_barrier_func); 362fc2e4d70SOleg Nesterov __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 363fc2e4d70SOleg Nesterov 364fc2e4d70SOleg Nesterov init_completion(&barr->done); 36583c22520SOleg Nesterov 3661a4d9b0aSOleg Nesterov insert_work(cwq, &barr->work, head); 367fc2e4d70SOleg Nesterov } 368fc2e4d70SOleg Nesterov 36914441960SOleg Nesterov static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 3701da177e4SLinus Torvalds { 37114441960SOleg Nesterov int active; 37214441960SOleg Nesterov 3731da177e4SLinus Torvalds if (cwq->thread == current) { 3741da177e4SLinus Torvalds /* 3751da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 3761da177e4SLinus Torvalds * it by hand rather than deadlocking. 3771da177e4SLinus Torvalds */ 3781da177e4SLinus Torvalds run_workqueue(cwq); 37914441960SOleg Nesterov active = 1; 3801da177e4SLinus Torvalds } else { 381fc2e4d70SOleg Nesterov struct wq_barrier barr; 3821da177e4SLinus Torvalds 38314441960SOleg Nesterov active = 0; 38483c22520SOleg Nesterov spin_lock_irq(&cwq->lock); 38583c22520SOleg Nesterov if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 3861a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, &cwq->worklist); 38783c22520SOleg Nesterov active = 1; 38883c22520SOleg Nesterov } 38983c22520SOleg Nesterov spin_unlock_irq(&cwq->lock); 3901da177e4SLinus Torvalds 391d721304dSOleg Nesterov if (active) 392fc2e4d70SOleg Nesterov wait_for_completion(&barr.done); 3931da177e4SLinus Torvalds } 39414441960SOleg Nesterov 39514441960SOleg Nesterov return active; 39683c22520SOleg Nesterov } 3971da177e4SLinus Torvalds 3980fcb78c2SRolf Eike Beer /** 3991da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 4000fcb78c2SRolf Eike Beer * @wq: workqueue to flush 4011da177e4SLinus Torvalds * 4021da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 4031da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 4041da177e4SLinus Torvalds * 405fc2e4d70SOleg Nesterov * We sleep until all works which were queued on entry have been handled, 406fc2e4d70SOleg Nesterov * but we are not livelocked by new incoming ones. 4071da177e4SLinus Torvalds * 4081da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 4091da177e4SLinus Torvalds * helper threads to do it. 4101da177e4SLinus Torvalds */ 4117ad5b3a5SHarvey Harrison void flush_workqueue(struct workqueue_struct *wq) 4121da177e4SLinus Torvalds { 413e7577c50SRusty Russell const struct cpumask *cpu_map = wq_cpu_map(wq); 414cce1a165SOleg Nesterov int cpu; 415b1f4ec17SOleg Nesterov 416f293ea92SOleg Nesterov might_sleep(); 4173295f0efSIngo Molnar lock_map_acquire(&wq->lockdep_map); 4183295f0efSIngo Molnar lock_map_release(&wq->lockdep_map); 419*aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 42089ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 4211da177e4SLinus Torvalds } 422ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 4231da177e4SLinus Torvalds 424db700897SOleg Nesterov /** 425db700897SOleg Nesterov * flush_work - block until a work_struct's callback has terminated 426db700897SOleg Nesterov * @work: the work which is to be flushed 427db700897SOleg Nesterov * 428a67da70dSOleg Nesterov * Returns false if @work has already terminated. 429a67da70dSOleg Nesterov * 430db700897SOleg Nesterov * It is expected that, prior to calling flush_work(), the caller has 431db700897SOleg Nesterov * arranged for the work to not be requeued, otherwise it doesn't make 432db700897SOleg Nesterov * sense to use this function. 433db700897SOleg Nesterov */ 434db700897SOleg Nesterov int flush_work(struct work_struct *work) 435db700897SOleg Nesterov { 436db700897SOleg Nesterov struct cpu_workqueue_struct *cwq; 437db700897SOleg Nesterov struct list_head *prev; 438db700897SOleg Nesterov struct wq_barrier barr; 439db700897SOleg Nesterov 440db700897SOleg Nesterov might_sleep(); 441db700897SOleg Nesterov cwq = get_wq_data(work); 442db700897SOleg Nesterov if (!cwq) 443db700897SOleg Nesterov return 0; 444db700897SOleg Nesterov 4453295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 4463295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 447a67da70dSOleg Nesterov 448db700897SOleg Nesterov prev = NULL; 449db700897SOleg Nesterov spin_lock_irq(&cwq->lock); 450db700897SOleg Nesterov if (!list_empty(&work->entry)) { 451db700897SOleg Nesterov /* 452db700897SOleg Nesterov * See the comment near try_to_grab_pending()->smp_rmb(). 453db700897SOleg Nesterov * If it was re-queued under us we are not going to wait. 454db700897SOleg Nesterov */ 455db700897SOleg Nesterov smp_rmb(); 456db700897SOleg Nesterov if (unlikely(cwq != get_wq_data(work))) 457db700897SOleg Nesterov goto out; 458db700897SOleg Nesterov prev = &work->entry; 459db700897SOleg Nesterov } else { 460db700897SOleg Nesterov if (cwq->current_work != work) 461db700897SOleg Nesterov goto out; 462db700897SOleg Nesterov prev = &cwq->worklist; 463db700897SOleg Nesterov } 464db700897SOleg Nesterov insert_wq_barrier(cwq, &barr, prev->next); 465db700897SOleg Nesterov out: 466db700897SOleg Nesterov spin_unlock_irq(&cwq->lock); 467db700897SOleg Nesterov if (!prev) 468db700897SOleg Nesterov return 0; 469db700897SOleg Nesterov 470db700897SOleg Nesterov wait_for_completion(&barr.done); 471db700897SOleg Nesterov return 1; 472db700897SOleg Nesterov } 473db700897SOleg Nesterov EXPORT_SYMBOL_GPL(flush_work); 474db700897SOleg Nesterov 4756e84d644SOleg Nesterov /* 4761f1f642eSOleg Nesterov * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 4776e84d644SOleg Nesterov * so this work can't be re-armed in any way. 4786e84d644SOleg Nesterov */ 4796e84d644SOleg Nesterov static int try_to_grab_pending(struct work_struct *work) 4806e84d644SOleg Nesterov { 4816e84d644SOleg Nesterov struct cpu_workqueue_struct *cwq; 4821f1f642eSOleg Nesterov int ret = -1; 4836e84d644SOleg Nesterov 4846e84d644SOleg Nesterov if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) 4851f1f642eSOleg Nesterov return 0; 4866e84d644SOleg Nesterov 4876e84d644SOleg Nesterov /* 4886e84d644SOleg Nesterov * The queueing is in progress, or it is already queued. Try to 4896e84d644SOleg Nesterov * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 4906e84d644SOleg Nesterov */ 4916e84d644SOleg Nesterov 4926e84d644SOleg Nesterov cwq = get_wq_data(work); 4936e84d644SOleg Nesterov if (!cwq) 4946e84d644SOleg Nesterov return ret; 4956e84d644SOleg Nesterov 4966e84d644SOleg Nesterov spin_lock_irq(&cwq->lock); 4976e84d644SOleg Nesterov if (!list_empty(&work->entry)) { 4986e84d644SOleg Nesterov /* 4996e84d644SOleg Nesterov * This work is queued, but perhaps we locked the wrong cwq. 5006e84d644SOleg Nesterov * In that case we must see the new value after rmb(), see 5016e84d644SOleg Nesterov * insert_work()->wmb(). 5026e84d644SOleg Nesterov */ 5036e84d644SOleg Nesterov smp_rmb(); 5046e84d644SOleg Nesterov if (cwq == get_wq_data(work)) { 5056e84d644SOleg Nesterov list_del_init(&work->entry); 5066e84d644SOleg Nesterov ret = 1; 5076e84d644SOleg Nesterov } 5086e84d644SOleg Nesterov } 5096e84d644SOleg Nesterov spin_unlock_irq(&cwq->lock); 5106e84d644SOleg Nesterov 5116e84d644SOleg Nesterov return ret; 5126e84d644SOleg Nesterov } 5136e84d644SOleg Nesterov 5146e84d644SOleg Nesterov static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, 515b89deed3SOleg Nesterov struct work_struct *work) 516b89deed3SOleg Nesterov { 517b89deed3SOleg Nesterov struct wq_barrier barr; 518b89deed3SOleg Nesterov int running = 0; 519b89deed3SOleg Nesterov 520b89deed3SOleg Nesterov spin_lock_irq(&cwq->lock); 521b89deed3SOleg Nesterov if (unlikely(cwq->current_work == work)) { 5221a4d9b0aSOleg Nesterov insert_wq_barrier(cwq, &barr, cwq->worklist.next); 523b89deed3SOleg Nesterov running = 1; 524b89deed3SOleg Nesterov } 525b89deed3SOleg Nesterov spin_unlock_irq(&cwq->lock); 526b89deed3SOleg Nesterov 5273af24433SOleg Nesterov if (unlikely(running)) 528b89deed3SOleg Nesterov wait_for_completion(&barr.done); 529b89deed3SOleg Nesterov } 530b89deed3SOleg Nesterov 5316e84d644SOleg Nesterov static void wait_on_work(struct work_struct *work) 532b89deed3SOleg Nesterov { 533b89deed3SOleg Nesterov struct cpu_workqueue_struct *cwq; 53428e53bddSOleg Nesterov struct workqueue_struct *wq; 535e7577c50SRusty Russell const struct cpumask *cpu_map; 536b1f4ec17SOleg Nesterov int cpu; 537b89deed3SOleg Nesterov 538f293ea92SOleg Nesterov might_sleep(); 539f293ea92SOleg Nesterov 5403295f0efSIngo Molnar lock_map_acquire(&work->lockdep_map); 5413295f0efSIngo Molnar lock_map_release(&work->lockdep_map); 5424e6045f1SJohannes Berg 543b89deed3SOleg Nesterov cwq = get_wq_data(work); 544b89deed3SOleg Nesterov if (!cwq) 5453af24433SOleg Nesterov return; 546b89deed3SOleg Nesterov 54728e53bddSOleg Nesterov wq = cwq->wq; 54828e53bddSOleg Nesterov cpu_map = wq_cpu_map(wq); 54928e53bddSOleg Nesterov 550*aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 5516e84d644SOleg Nesterov wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 5526e84d644SOleg Nesterov } 5536e84d644SOleg Nesterov 5541f1f642eSOleg Nesterov static int __cancel_work_timer(struct work_struct *work, 5551f1f642eSOleg Nesterov struct timer_list* timer) 5561f1f642eSOleg Nesterov { 5571f1f642eSOleg Nesterov int ret; 5581f1f642eSOleg Nesterov 5591f1f642eSOleg Nesterov do { 5601f1f642eSOleg Nesterov ret = (timer && likely(del_timer(timer))); 5611f1f642eSOleg Nesterov if (!ret) 5621f1f642eSOleg Nesterov ret = try_to_grab_pending(work); 5631f1f642eSOleg Nesterov wait_on_work(work); 5641f1f642eSOleg Nesterov } while (unlikely(ret < 0)); 5651f1f642eSOleg Nesterov 5661f1f642eSOleg Nesterov work_clear_pending(work); 5671f1f642eSOleg Nesterov return ret; 5681f1f642eSOleg Nesterov } 5691f1f642eSOleg Nesterov 5706e84d644SOleg Nesterov /** 5716e84d644SOleg Nesterov * cancel_work_sync - block until a work_struct's callback has terminated 5726e84d644SOleg Nesterov * @work: the work which is to be flushed 5736e84d644SOleg Nesterov * 5741f1f642eSOleg Nesterov * Returns true if @work was pending. 5751f1f642eSOleg Nesterov * 5766e84d644SOleg Nesterov * cancel_work_sync() will cancel the work if it is queued. If the work's 5776e84d644SOleg Nesterov * callback appears to be running, cancel_work_sync() will block until it 5786e84d644SOleg Nesterov * has completed. 5796e84d644SOleg Nesterov * 5806e84d644SOleg Nesterov * It is possible to use this function if the work re-queues itself. It can 5816e84d644SOleg Nesterov * cancel the work even if it migrates to another workqueue, however in that 5826e84d644SOleg Nesterov * case it only guarantees that work->func() has completed on the last queued 5836e84d644SOleg Nesterov * workqueue. 5846e84d644SOleg Nesterov * 5856e84d644SOleg Nesterov * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not 5866e84d644SOleg Nesterov * pending, otherwise it goes into a busy-wait loop until the timer expires. 5876e84d644SOleg Nesterov * 5886e84d644SOleg Nesterov * The caller must ensure that workqueue_struct on which this work was last 5896e84d644SOleg Nesterov * queued can't be destroyed before this function returns. 5906e84d644SOleg Nesterov */ 5911f1f642eSOleg Nesterov int cancel_work_sync(struct work_struct *work) 5926e84d644SOleg Nesterov { 5931f1f642eSOleg Nesterov return __cancel_work_timer(work, NULL); 594b89deed3SOleg Nesterov } 59528e53bddSOleg Nesterov EXPORT_SYMBOL_GPL(cancel_work_sync); 596b89deed3SOleg Nesterov 5976e84d644SOleg Nesterov /** 598f5a421a4SOleg Nesterov * cancel_delayed_work_sync - reliably kill off a delayed work. 5996e84d644SOleg Nesterov * @dwork: the delayed work struct 6006e84d644SOleg Nesterov * 6011f1f642eSOleg Nesterov * Returns true if @dwork was pending. 6021f1f642eSOleg Nesterov * 6036e84d644SOleg Nesterov * It is possible to use this function if @dwork rearms itself via queue_work() 6046e84d644SOleg Nesterov * or queue_delayed_work(). See also the comment for cancel_work_sync(). 6056e84d644SOleg Nesterov */ 6061f1f642eSOleg Nesterov int cancel_delayed_work_sync(struct delayed_work *dwork) 6076e84d644SOleg Nesterov { 6081f1f642eSOleg Nesterov return __cancel_work_timer(&dwork->work, &dwork->timer); 6096e84d644SOleg Nesterov } 610f5a421a4SOleg Nesterov EXPORT_SYMBOL(cancel_delayed_work_sync); 6111da177e4SLinus Torvalds 6126e84d644SOleg Nesterov static struct workqueue_struct *keventd_wq __read_mostly; 6131da177e4SLinus Torvalds 6140fcb78c2SRolf Eike Beer /** 6150fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 6160fcb78c2SRolf Eike Beer * @work: job to be done 6170fcb78c2SRolf Eike Beer * 6180fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 6190fcb78c2SRolf Eike Beer */ 6207ad5b3a5SHarvey Harrison int schedule_work(struct work_struct *work) 6211da177e4SLinus Torvalds { 6221da177e4SLinus Torvalds return queue_work(keventd_wq, work); 6231da177e4SLinus Torvalds } 624ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 6251da177e4SLinus Torvalds 626c1a220e7SZhang Rui /* 627c1a220e7SZhang Rui * schedule_work_on - put work task on a specific cpu 628c1a220e7SZhang Rui * @cpu: cpu to put the work task on 629c1a220e7SZhang Rui * @work: job to be done 630c1a220e7SZhang Rui * 631c1a220e7SZhang Rui * This puts a job on a specific cpu 632c1a220e7SZhang Rui */ 633c1a220e7SZhang Rui int schedule_work_on(int cpu, struct work_struct *work) 634c1a220e7SZhang Rui { 635c1a220e7SZhang Rui return queue_work_on(cpu, keventd_wq, work); 636c1a220e7SZhang Rui } 637c1a220e7SZhang Rui EXPORT_SYMBOL(schedule_work_on); 638c1a220e7SZhang Rui 6390fcb78c2SRolf Eike Beer /** 6400fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 64152bad64dSDavid Howells * @dwork: job to be done 64252bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 6430fcb78c2SRolf Eike Beer * 6440fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 6450fcb78c2SRolf Eike Beer * workqueue. 6460fcb78c2SRolf Eike Beer */ 6477ad5b3a5SHarvey Harrison int schedule_delayed_work(struct delayed_work *dwork, 64882f67cd9SIngo Molnar unsigned long delay) 6491da177e4SLinus Torvalds { 65052bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 6511da177e4SLinus Torvalds } 652ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 6531da177e4SLinus Torvalds 6540fcb78c2SRolf Eike Beer /** 6550fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 6560fcb78c2SRolf Eike Beer * @cpu: cpu to use 65752bad64dSDavid Howells * @dwork: job to be done 6580fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 6590fcb78c2SRolf Eike Beer * 6600fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 6610fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 6620fcb78c2SRolf Eike Beer */ 6631da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 66452bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 6651da177e4SLinus Torvalds { 66652bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 6671da177e4SLinus Torvalds } 668ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 6691da177e4SLinus Torvalds 670b6136773SAndrew Morton /** 671b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 672b6136773SAndrew Morton * @func: the function to call 673b6136773SAndrew Morton * 674b6136773SAndrew Morton * Returns zero on success. 675b6136773SAndrew Morton * Returns -ve errno on failure. 676b6136773SAndrew Morton * 677b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 678b6136773SAndrew Morton */ 67965f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 68015316ba8SChristoph Lameter { 68115316ba8SChristoph Lameter int cpu; 682b6136773SAndrew Morton struct work_struct *works; 68315316ba8SChristoph Lameter 684b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 685b6136773SAndrew Morton if (!works) 68615316ba8SChristoph Lameter return -ENOMEM; 687b6136773SAndrew Morton 68895402b38SGautham R Shenoy get_online_cpus(); 68915316ba8SChristoph Lameter for_each_online_cpu(cpu) { 6909bfb1839SIngo Molnar struct work_struct *work = per_cpu_ptr(works, cpu); 6919bfb1839SIngo Molnar 6929bfb1839SIngo Molnar INIT_WORK(work, func); 6938de6d308SOleg Nesterov schedule_work_on(cpu, work); 69415316ba8SChristoph Lameter } 6958616a89aSOleg Nesterov for_each_online_cpu(cpu) 6968616a89aSOleg Nesterov flush_work(per_cpu_ptr(works, cpu)); 69795402b38SGautham R Shenoy put_online_cpus(); 698b6136773SAndrew Morton free_percpu(works); 69915316ba8SChristoph Lameter return 0; 70015316ba8SChristoph Lameter } 70115316ba8SChristoph Lameter 7021da177e4SLinus Torvalds void flush_scheduled_work(void) 7031da177e4SLinus Torvalds { 7041da177e4SLinus Torvalds flush_workqueue(keventd_wq); 7051da177e4SLinus Torvalds } 706ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 7071da177e4SLinus Torvalds 7081da177e4SLinus Torvalds /** 7091fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 7101fa44ecaSJames Bottomley * @fn: the function to execute 7111fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 7121fa44ecaSJames Bottomley * be available when the work executes) 7131fa44ecaSJames Bottomley * 7141fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 7151fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 7161fa44ecaSJames Bottomley * 7171fa44ecaSJames Bottomley * Returns: 0 - function was executed 7181fa44ecaSJames Bottomley * 1 - function was scheduled for execution 7191fa44ecaSJames Bottomley */ 72065f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 7211fa44ecaSJames Bottomley { 7221fa44ecaSJames Bottomley if (!in_interrupt()) { 72365f27f38SDavid Howells fn(&ew->work); 7241fa44ecaSJames Bottomley return 0; 7251fa44ecaSJames Bottomley } 7261fa44ecaSJames Bottomley 72765f27f38SDavid Howells INIT_WORK(&ew->work, fn); 7281fa44ecaSJames Bottomley schedule_work(&ew->work); 7291fa44ecaSJames Bottomley 7301fa44ecaSJames Bottomley return 1; 7311fa44ecaSJames Bottomley } 7321fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 7331fa44ecaSJames Bottomley 7341da177e4SLinus Torvalds int keventd_up(void) 7351da177e4SLinus Torvalds { 7361da177e4SLinus Torvalds return keventd_wq != NULL; 7371da177e4SLinus Torvalds } 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds int current_is_keventd(void) 7401da177e4SLinus Torvalds { 7411da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 742d243769dSHugh Dickins int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 7431da177e4SLinus Torvalds int ret = 0; 7441da177e4SLinus Torvalds 7451da177e4SLinus Torvalds BUG_ON(!keventd_wq); 7461da177e4SLinus Torvalds 74789ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 7481da177e4SLinus Torvalds if (current == cwq->thread) 7491da177e4SLinus Torvalds ret = 1; 7501da177e4SLinus Torvalds 7511da177e4SLinus Torvalds return ret; 7521da177e4SLinus Torvalds 7531da177e4SLinus Torvalds } 7541da177e4SLinus Torvalds 7553af24433SOleg Nesterov static struct cpu_workqueue_struct * 7563af24433SOleg Nesterov init_cpu_workqueue(struct workqueue_struct *wq, int cpu) 7571da177e4SLinus Torvalds { 75889ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 7593af24433SOleg Nesterov 7603af24433SOleg Nesterov cwq->wq = wq; 7613af24433SOleg Nesterov spin_lock_init(&cwq->lock); 7623af24433SOleg Nesterov INIT_LIST_HEAD(&cwq->worklist); 7633af24433SOleg Nesterov init_waitqueue_head(&cwq->more_work); 7643af24433SOleg Nesterov 7653af24433SOleg Nesterov return cwq; 7663af24433SOleg Nesterov } 7673af24433SOleg Nesterov 7683af24433SOleg Nesterov static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 7693af24433SOleg Nesterov { 7700d557dc9SHeiko Carstens struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 7713af24433SOleg Nesterov struct workqueue_struct *wq = cwq->wq; 7726cc88bc4SDavid Howells const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; 7733af24433SOleg Nesterov struct task_struct *p; 7743af24433SOleg Nesterov 7753af24433SOleg Nesterov p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 7763af24433SOleg Nesterov /* 7773af24433SOleg Nesterov * Nobody can add the work_struct to this cwq, 7783af24433SOleg Nesterov * if (caller is __create_workqueue) 7793af24433SOleg Nesterov * nobody should see this wq 7803af24433SOleg Nesterov * else // caller is CPU_UP_PREPARE 7813af24433SOleg Nesterov * cpu is not on cpu_online_map 7823af24433SOleg Nesterov * so we can abort safely. 7833af24433SOleg Nesterov */ 7843af24433SOleg Nesterov if (IS_ERR(p)) 7853af24433SOleg Nesterov return PTR_ERR(p); 7860d557dc9SHeiko Carstens if (cwq->wq->rt) 7870d557dc9SHeiko Carstens sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); 7883af24433SOleg Nesterov cwq->thread = p; 7893af24433SOleg Nesterov 7903af24433SOleg Nesterov return 0; 7913af24433SOleg Nesterov } 7923af24433SOleg Nesterov 79306ba38a9SOleg Nesterov static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 79406ba38a9SOleg Nesterov { 79506ba38a9SOleg Nesterov struct task_struct *p = cwq->thread; 79606ba38a9SOleg Nesterov 79706ba38a9SOleg Nesterov if (p != NULL) { 79806ba38a9SOleg Nesterov if (cpu >= 0) 79906ba38a9SOleg Nesterov kthread_bind(p, cpu); 80006ba38a9SOleg Nesterov wake_up_process(p); 80106ba38a9SOleg Nesterov } 80206ba38a9SOleg Nesterov } 80306ba38a9SOleg Nesterov 8044e6045f1SJohannes Berg struct workqueue_struct *__create_workqueue_key(const char *name, 8054e6045f1SJohannes Berg int singlethread, 8064e6045f1SJohannes Berg int freezeable, 8070d557dc9SHeiko Carstens int rt, 808eb13ba87SJohannes Berg struct lock_class_key *key, 809eb13ba87SJohannes Berg const char *lock_name) 8103af24433SOleg Nesterov { 8113af24433SOleg Nesterov struct workqueue_struct *wq; 8123af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 8133af24433SOleg Nesterov int err = 0, cpu; 8143af24433SOleg Nesterov 8153af24433SOleg Nesterov wq = kzalloc(sizeof(*wq), GFP_KERNEL); 8163af24433SOleg Nesterov if (!wq) 8173af24433SOleg Nesterov return NULL; 8183af24433SOleg Nesterov 8193af24433SOleg Nesterov wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 8203af24433SOleg Nesterov if (!wq->cpu_wq) { 8213af24433SOleg Nesterov kfree(wq); 8223af24433SOleg Nesterov return NULL; 8233af24433SOleg Nesterov } 8243af24433SOleg Nesterov 8253af24433SOleg Nesterov wq->name = name; 826eb13ba87SJohannes Berg lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 827cce1a165SOleg Nesterov wq->singlethread = singlethread; 8283af24433SOleg Nesterov wq->freezeable = freezeable; 8290d557dc9SHeiko Carstens wq->rt = rt; 830cce1a165SOleg Nesterov INIT_LIST_HEAD(&wq->list); 8313af24433SOleg Nesterov 8323af24433SOleg Nesterov if (singlethread) { 8333af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, singlethread_cpu); 8343af24433SOleg Nesterov err = create_workqueue_thread(cwq, singlethread_cpu); 83506ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 8363af24433SOleg Nesterov } else { 8373da1c84cSOleg Nesterov cpu_maps_update_begin(); 8386af8bf3dSOleg Nesterov /* 8396af8bf3dSOleg Nesterov * We must place this wq on list even if the code below fails. 8406af8bf3dSOleg Nesterov * cpu_down(cpu) can remove cpu from cpu_populated_map before 8416af8bf3dSOleg Nesterov * destroy_workqueue() takes the lock, in that case we leak 8426af8bf3dSOleg Nesterov * cwq[cpu]->thread. 8436af8bf3dSOleg Nesterov */ 84495402b38SGautham R Shenoy spin_lock(&workqueue_lock); 8453af24433SOleg Nesterov list_add(&wq->list, &workqueues); 84695402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 8476af8bf3dSOleg Nesterov /* 8486af8bf3dSOleg Nesterov * We must initialize cwqs for each possible cpu even if we 8496af8bf3dSOleg Nesterov * are going to call destroy_workqueue() finally. Otherwise 8506af8bf3dSOleg Nesterov * cpu_up() can hit the uninitialized cwq once we drop the 8516af8bf3dSOleg Nesterov * lock. 8526af8bf3dSOleg Nesterov */ 8533af24433SOleg Nesterov for_each_possible_cpu(cpu) { 8543af24433SOleg Nesterov cwq = init_cpu_workqueue(wq, cpu); 8553af24433SOleg Nesterov if (err || !cpu_online(cpu)) 8563af24433SOleg Nesterov continue; 8573af24433SOleg Nesterov err = create_workqueue_thread(cwq, cpu); 85806ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 8593af24433SOleg Nesterov } 8603da1c84cSOleg Nesterov cpu_maps_update_done(); 8613af24433SOleg Nesterov } 8623af24433SOleg Nesterov 8633af24433SOleg Nesterov if (err) { 8643af24433SOleg Nesterov destroy_workqueue(wq); 8653af24433SOleg Nesterov wq = NULL; 8663af24433SOleg Nesterov } 8673af24433SOleg Nesterov return wq; 8683af24433SOleg Nesterov } 8694e6045f1SJohannes Berg EXPORT_SYMBOL_GPL(__create_workqueue_key); 8703af24433SOleg Nesterov 8711e35eaa2SOleg Nesterov static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 8723af24433SOleg Nesterov { 8733af24433SOleg Nesterov /* 8743da1c84cSOleg Nesterov * Our caller is either destroy_workqueue() or CPU_POST_DEAD, 8753da1c84cSOleg Nesterov * cpu_add_remove_lock protects cwq->thread. 8763af24433SOleg Nesterov */ 87714441960SOleg Nesterov if (cwq->thread == NULL) 87814441960SOleg Nesterov return; 87914441960SOleg Nesterov 8803295f0efSIngo Molnar lock_map_acquire(&cwq->wq->lockdep_map); 8813295f0efSIngo Molnar lock_map_release(&cwq->wq->lockdep_map); 8824e6045f1SJohannes Berg 88313c22168SOleg Nesterov flush_cpu_workqueue(cwq); 88414441960SOleg Nesterov /* 8853da1c84cSOleg Nesterov * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, 88613c22168SOleg Nesterov * a concurrent flush_workqueue() can insert a barrier after us. 88713c22168SOleg Nesterov * However, in that case run_workqueue() won't return and check 88813c22168SOleg Nesterov * kthread_should_stop() until it flushes all work_struct's. 88914441960SOleg Nesterov * When ->worklist becomes empty it is safe to exit because no 89014441960SOleg Nesterov * more work_structs can be queued on this cwq: flush_workqueue 89114441960SOleg Nesterov * checks list_empty(), and a "normal" queue_work() can't use 89214441960SOleg Nesterov * a dead CPU. 89314441960SOleg Nesterov */ 89414441960SOleg Nesterov kthread_stop(cwq->thread); 89514441960SOleg Nesterov cwq->thread = NULL; 8961da177e4SLinus Torvalds } 8971da177e4SLinus Torvalds 8983af24433SOleg Nesterov /** 8993af24433SOleg Nesterov * destroy_workqueue - safely terminate a workqueue 9003af24433SOleg Nesterov * @wq: target workqueue 9013af24433SOleg Nesterov * 9023af24433SOleg Nesterov * Safely destroy a workqueue. All work currently pending will be done first. 9033af24433SOleg Nesterov */ 9043af24433SOleg Nesterov void destroy_workqueue(struct workqueue_struct *wq) 9053af24433SOleg Nesterov { 906e7577c50SRusty Russell const struct cpumask *cpu_map = wq_cpu_map(wq); 9073af24433SOleg Nesterov int cpu; 9083af24433SOleg Nesterov 9093da1c84cSOleg Nesterov cpu_maps_update_begin(); 91095402b38SGautham R Shenoy spin_lock(&workqueue_lock); 9113af24433SOleg Nesterov list_del(&wq->list); 91295402b38SGautham R Shenoy spin_unlock(&workqueue_lock); 9133af24433SOleg Nesterov 914*aa85ea5bSRusty Russell for_each_cpu(cpu, cpu_map) 9151e35eaa2SOleg Nesterov cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 9163da1c84cSOleg Nesterov cpu_maps_update_done(); 9173af24433SOleg Nesterov 9183af24433SOleg Nesterov free_percpu(wq->cpu_wq); 9193af24433SOleg Nesterov kfree(wq); 9203af24433SOleg Nesterov } 9213af24433SOleg Nesterov EXPORT_SYMBOL_GPL(destroy_workqueue); 9223af24433SOleg Nesterov 9239c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 9241da177e4SLinus Torvalds unsigned long action, 9251da177e4SLinus Torvalds void *hcpu) 9261da177e4SLinus Torvalds { 9273af24433SOleg Nesterov unsigned int cpu = (unsigned long)hcpu; 9283af24433SOleg Nesterov struct cpu_workqueue_struct *cwq; 9291da177e4SLinus Torvalds struct workqueue_struct *wq; 9308448502cSOleg Nesterov int ret = NOTIFY_OK; 9311da177e4SLinus Torvalds 9328bb78442SRafael J. Wysocki action &= ~CPU_TASKS_FROZEN; 9338bb78442SRafael J. Wysocki 9341da177e4SLinus Torvalds switch (action) { 9353af24433SOleg Nesterov case CPU_UP_PREPARE: 936e7577c50SRusty Russell cpumask_set_cpu(cpu, cpu_populated_map); 9373af24433SOleg Nesterov } 9388448502cSOleg Nesterov undo: 9391da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 9403af24433SOleg Nesterov cwq = per_cpu_ptr(wq->cpu_wq, cpu); 9413af24433SOleg Nesterov 9423af24433SOleg Nesterov switch (action) { 9433af24433SOleg Nesterov case CPU_UP_PREPARE: 9443af24433SOleg Nesterov if (!create_workqueue_thread(cwq, cpu)) 9451da177e4SLinus Torvalds break; 94695402b38SGautham R Shenoy printk(KERN_ERR "workqueue [%s] for %i failed\n", 94795402b38SGautham R Shenoy wq->name, cpu); 9488448502cSOleg Nesterov action = CPU_UP_CANCELED; 9498448502cSOleg Nesterov ret = NOTIFY_BAD; 9508448502cSOleg Nesterov goto undo; 9511da177e4SLinus Torvalds 9521da177e4SLinus Torvalds case CPU_ONLINE: 95306ba38a9SOleg Nesterov start_workqueue_thread(cwq, cpu); 9541da177e4SLinus Torvalds break; 9551da177e4SLinus Torvalds 9561da177e4SLinus Torvalds case CPU_UP_CANCELED: 95706ba38a9SOleg Nesterov start_workqueue_thread(cwq, -1); 9583da1c84cSOleg Nesterov case CPU_POST_DEAD: 9591e35eaa2SOleg Nesterov cleanup_workqueue_thread(cwq); 9601da177e4SLinus Torvalds break; 9611da177e4SLinus Torvalds } 9623af24433SOleg Nesterov } 9631da177e4SLinus Torvalds 96400dfcaf7SOleg Nesterov switch (action) { 96500dfcaf7SOleg Nesterov case CPU_UP_CANCELED: 9663da1c84cSOleg Nesterov case CPU_POST_DEAD: 967e7577c50SRusty Russell cpumask_clear_cpu(cpu, cpu_populated_map); 96800dfcaf7SOleg Nesterov } 96900dfcaf7SOleg Nesterov 9708448502cSOleg Nesterov return ret; 9711da177e4SLinus Torvalds } 9721da177e4SLinus Torvalds 9732d3854a3SRusty Russell #ifdef CONFIG_SMP 9748ccad40dSRusty Russell static struct workqueue_struct *work_on_cpu_wq __read_mostly; 9758ccad40dSRusty Russell 9762d3854a3SRusty Russell struct work_for_cpu { 9772d3854a3SRusty Russell struct work_struct work; 9782d3854a3SRusty Russell long (*fn)(void *); 9792d3854a3SRusty Russell void *arg; 9802d3854a3SRusty Russell long ret; 9812d3854a3SRusty Russell }; 9822d3854a3SRusty Russell 9832d3854a3SRusty Russell static void do_work_for_cpu(struct work_struct *w) 9842d3854a3SRusty Russell { 9852d3854a3SRusty Russell struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); 9862d3854a3SRusty Russell 9872d3854a3SRusty Russell wfc->ret = wfc->fn(wfc->arg); 9882d3854a3SRusty Russell } 9892d3854a3SRusty Russell 9902d3854a3SRusty Russell /** 9912d3854a3SRusty Russell * work_on_cpu - run a function in user context on a particular cpu 9922d3854a3SRusty Russell * @cpu: the cpu to run on 9932d3854a3SRusty Russell * @fn: the function to run 9942d3854a3SRusty Russell * @arg: the function arg 9952d3854a3SRusty Russell * 99631ad9081SRusty Russell * This will return the value @fn returns. 99731ad9081SRusty Russell * It is up to the caller to ensure that the cpu doesn't go offline. 9982d3854a3SRusty Russell */ 9992d3854a3SRusty Russell long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 10002d3854a3SRusty Russell { 10012d3854a3SRusty Russell struct work_for_cpu wfc; 10022d3854a3SRusty Russell 10032d3854a3SRusty Russell INIT_WORK(&wfc.work, do_work_for_cpu); 10042d3854a3SRusty Russell wfc.fn = fn; 10052d3854a3SRusty Russell wfc.arg = arg; 10068ccad40dSRusty Russell queue_work_on(cpu, work_on_cpu_wq, &wfc.work); 10072d3854a3SRusty Russell flush_work(&wfc.work); 10082d3854a3SRusty Russell 10092d3854a3SRusty Russell return wfc.ret; 10102d3854a3SRusty Russell } 10112d3854a3SRusty Russell EXPORT_SYMBOL_GPL(work_on_cpu); 10122d3854a3SRusty Russell #endif /* CONFIG_SMP */ 10132d3854a3SRusty Russell 1014c12920d1SOleg Nesterov void __init init_workqueues(void) 10151da177e4SLinus Torvalds { 1016e7577c50SRusty Russell alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); 1017e7577c50SRusty Russell 1018e7577c50SRusty Russell cpumask_copy(cpu_populated_map, cpu_online_mask); 1019e7577c50SRusty Russell singlethread_cpu = cpumask_first(cpu_possible_mask); 1020e7577c50SRusty Russell cpu_singlethread_map = cpumask_of(singlethread_cpu); 10211da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 10221da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 10231da177e4SLinus Torvalds BUG_ON(!keventd_wq); 10248ccad40dSRusty Russell #ifdef CONFIG_SMP 10258ccad40dSRusty Russell work_on_cpu_wq = create_workqueue("work_on_cpu"); 10268ccad40dSRusty Russell BUG_ON(!work_on_cpu_wq); 10278ccad40dSRusty Russell #endif 10281da177e4SLinus Torvalds } 1029