11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <[email protected]> 121da177e4SLinus Torvalds * Andrew Morton <[email protected]> 131da177e4SLinus Torvalds * Kai Petzke <[email protected]> 141da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <[email protected]>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 3146934023SChristoph Lameter #include <linux/mempolicy.h> 32341a5958SRafael J. Wysocki #include <linux/freezer.h> 33*d5abe669SPeter Zijlstra #include <linux/kallsyms.h> 34*d5abe669SPeter Zijlstra #include <linux/debug_locks.h> 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds /* 37f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 38f756d5e2SNathan Lynch * possible cpu). 391da177e4SLinus Torvalds * 401da177e4SLinus Torvalds * The sequence counters are for flush_scheduled_work(). It wants to wait 419f5d785eSRolf Eike Beer * until all currently-scheduled works are completed, but it doesn't 421da177e4SLinus Torvalds * want to be livelocked by new, incoming ones. So it waits until 431da177e4SLinus Torvalds * remove_sequence is >= the insert_sequence which pertained when 441da177e4SLinus Torvalds * flush_scheduled_work() was called. 451da177e4SLinus Torvalds */ 461da177e4SLinus Torvalds struct cpu_workqueue_struct { 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds spinlock_t lock; 491da177e4SLinus Torvalds 501da177e4SLinus Torvalds long remove_sequence; /* Least-recently added (next to run) */ 511da177e4SLinus Torvalds long insert_sequence; /* Next to add */ 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds struct list_head worklist; 541da177e4SLinus Torvalds wait_queue_head_t more_work; 551da177e4SLinus Torvalds wait_queue_head_t work_done; 561da177e4SLinus Torvalds 571da177e4SLinus Torvalds struct workqueue_struct *wq; 5836c8b586SIngo Molnar struct task_struct *thread; 591da177e4SLinus Torvalds 601da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 61341a5958SRafael J. Wysocki 62341a5958SRafael J. Wysocki int freezeable; /* Freeze the thread during suspend */ 631da177e4SLinus Torvalds } ____cacheline_aligned; 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds /* 661da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 671da177e4SLinus Torvalds * per-CPU workqueues: 681da177e4SLinus Torvalds */ 691da177e4SLinus Torvalds struct workqueue_struct { 7089ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 711da177e4SLinus Torvalds const char *name; 721da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 731da177e4SLinus Torvalds }; 741da177e4SLinus Torvalds 751da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 761da177e4SLinus Torvalds threads to each one as cpus come/go. */ 779b41ea72SAndrew Morton static DEFINE_MUTEX(workqueue_mutex); 781da177e4SLinus Torvalds static LIST_HEAD(workqueues); 791da177e4SLinus Torvalds 80f756d5e2SNathan Lynch static int singlethread_cpu; 81f756d5e2SNathan Lynch 821da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 831da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 841da177e4SLinus Torvalds { 851da177e4SLinus Torvalds return list_empty(&wq->list); 861da177e4SLinus Torvalds } 871da177e4SLinus Torvalds 88365970a1SDavid Howells static inline void set_wq_data(struct work_struct *work, void *wq) 89365970a1SDavid Howells { 90365970a1SDavid Howells unsigned long new, old, res; 91365970a1SDavid Howells 92365970a1SDavid Howells /* assume the pending flag is already set and that the task has already 93365970a1SDavid Howells * been queued on this workqueue */ 94365970a1SDavid Howells new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); 95365970a1SDavid Howells res = work->management; 96365970a1SDavid Howells if (res != new) { 97365970a1SDavid Howells do { 98365970a1SDavid Howells old = res; 99365970a1SDavid Howells new = (unsigned long) wq; 100365970a1SDavid Howells new |= (old & WORK_STRUCT_FLAG_MASK); 101365970a1SDavid Howells res = cmpxchg(&work->management, old, new); 102365970a1SDavid Howells } while (res != old); 103365970a1SDavid Howells } 104365970a1SDavid Howells } 105365970a1SDavid Howells 106365970a1SDavid Howells static inline void *get_wq_data(struct work_struct *work) 107365970a1SDavid Howells { 108365970a1SDavid Howells return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); 109365970a1SDavid Howells } 110365970a1SDavid Howells 1111da177e4SLinus Torvalds /* Preempt must be disabled. */ 1121da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 1131da177e4SLinus Torvalds struct work_struct *work) 1141da177e4SLinus Torvalds { 1151da177e4SLinus Torvalds unsigned long flags; 1161da177e4SLinus Torvalds 1171da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 118365970a1SDavid Howells set_wq_data(work, cwq); 1191da177e4SLinus Torvalds list_add_tail(&work->entry, &cwq->worklist); 1201da177e4SLinus Torvalds cwq->insert_sequence++; 1211da177e4SLinus Torvalds wake_up(&cwq->more_work); 1221da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1231da177e4SLinus Torvalds } 1241da177e4SLinus Torvalds 1250fcb78c2SRolf Eike Beer /** 1260fcb78c2SRolf Eike Beer * queue_work - queue work on a workqueue 1270fcb78c2SRolf Eike Beer * @wq: workqueue to use 1280fcb78c2SRolf Eike Beer * @work: work to queue 1290fcb78c2SRolf Eike Beer * 130057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1311da177e4SLinus Torvalds * 1321da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 1331da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 1341da177e4SLinus Torvalds */ 1351da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 1361da177e4SLinus Torvalds { 1371da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 1381da177e4SLinus Torvalds 139365970a1SDavid Howells if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 1401da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 141f756d5e2SNathan Lynch cpu = singlethread_cpu; 1421da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 14389ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 1441da177e4SLinus Torvalds ret = 1; 1451da177e4SLinus Torvalds } 1461da177e4SLinus Torvalds put_cpu(); 1471da177e4SLinus Torvalds return ret; 1481da177e4SLinus Torvalds } 149ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds static void delayed_work_timer_fn(unsigned long __data) 1521da177e4SLinus Torvalds { 15352bad64dSDavid Howells struct delayed_work *dwork = (struct delayed_work *)__data; 154365970a1SDavid Howells struct workqueue_struct *wq = get_wq_data(&dwork->work); 1551da177e4SLinus Torvalds int cpu = smp_processor_id(); 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 158f756d5e2SNathan Lynch cpu = singlethread_cpu; 1591da177e4SLinus Torvalds 16052bad64dSDavid Howells __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 1630fcb78c2SRolf Eike Beer /** 1640fcb78c2SRolf Eike Beer * queue_delayed_work - queue work on a workqueue after delay 1650fcb78c2SRolf Eike Beer * @wq: workqueue to use 16652bad64dSDavid Howells * @work: delayable work to queue 1670fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 1680fcb78c2SRolf Eike Beer * 169057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 1700fcb78c2SRolf Eike Beer */ 1711da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 17252bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 1731da177e4SLinus Torvalds { 1741da177e4SLinus Torvalds int ret = 0; 17552bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 17652bad64dSDavid Howells struct work_struct *work = &dwork->work; 17752bad64dSDavid Howells 17852bad64dSDavid Howells if (delay == 0) 17952bad64dSDavid Howells return queue_work(wq, work); 1801da177e4SLinus Torvalds 181365970a1SDavid Howells if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 1821da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 1831da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 1841da177e4SLinus Torvalds 1851da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 186365970a1SDavid Howells set_wq_data(work, wq); 1871da177e4SLinus Torvalds timer->expires = jiffies + delay; 18852bad64dSDavid Howells timer->data = (unsigned long)dwork; 1891da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 1901da177e4SLinus Torvalds add_timer(timer); 1911da177e4SLinus Torvalds ret = 1; 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds return ret; 1941da177e4SLinus Torvalds } 195ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 1961da177e4SLinus Torvalds 1970fcb78c2SRolf Eike Beer /** 1980fcb78c2SRolf Eike Beer * queue_delayed_work_on - queue work on specific CPU after delay 1990fcb78c2SRolf Eike Beer * @cpu: CPU number to execute work on 2000fcb78c2SRolf Eike Beer * @wq: workqueue to use 2010fcb78c2SRolf Eike Beer * @work: work to queue 2020fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait before queueing 2030fcb78c2SRolf Eike Beer * 204057647fcSAlan Stern * Returns 0 if @work was already on a queue, non-zero otherwise. 2050fcb78c2SRolf Eike Beer */ 2067a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 20752bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 2087a6bc1cdSVenkatesh Pallipadi { 2097a6bc1cdSVenkatesh Pallipadi int ret = 0; 21052bad64dSDavid Howells struct timer_list *timer = &dwork->timer; 21152bad64dSDavid Howells struct work_struct *work = &dwork->work; 2127a6bc1cdSVenkatesh Pallipadi 213365970a1SDavid Howells if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { 2147a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 2157a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 2167a6bc1cdSVenkatesh Pallipadi 2177a6bc1cdSVenkatesh Pallipadi /* This stores wq for the moment, for the timer_fn */ 218365970a1SDavid Howells set_wq_data(work, wq); 2197a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 22052bad64dSDavid Howells timer->data = (unsigned long)dwork; 2217a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 2227a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 2237a6bc1cdSVenkatesh Pallipadi ret = 1; 2247a6bc1cdSVenkatesh Pallipadi } 2257a6bc1cdSVenkatesh Pallipadi return ret; 2267a6bc1cdSVenkatesh Pallipadi } 227ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 2281da177e4SLinus Torvalds 229858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 2301da177e4SLinus Torvalds { 2311da177e4SLinus Torvalds unsigned long flags; 2321da177e4SLinus Torvalds 2331da177e4SLinus Torvalds /* 2341da177e4SLinus Torvalds * Keep taking off work from the queue until 2351da177e4SLinus Torvalds * done. 2361da177e4SLinus Torvalds */ 2371da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2381da177e4SLinus Torvalds cwq->run_depth++; 2391da177e4SLinus Torvalds if (cwq->run_depth > 3) { 2401da177e4SLinus Torvalds /* morton gets to eat his hat */ 2411da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 2421da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 2431da177e4SLinus Torvalds dump_stack(); 2441da177e4SLinus Torvalds } 2451da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 2461da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 2471da177e4SLinus Torvalds struct work_struct, entry); 2486bb49e59SDavid Howells work_func_t f = work->func; 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 2511da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 2521da177e4SLinus Torvalds 253365970a1SDavid Howells BUG_ON(get_wq_data(work) != cwq); 25465f27f38SDavid Howells if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) 25565f27f38SDavid Howells work_release(work); 25665f27f38SDavid Howells f(work); 2571da177e4SLinus Torvalds 258*d5abe669SPeter Zijlstra if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 259*d5abe669SPeter Zijlstra printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 260*d5abe669SPeter Zijlstra "%s/0x%08x/%d\n", 261*d5abe669SPeter Zijlstra current->comm, preempt_count(), 262*d5abe669SPeter Zijlstra current->pid); 263*d5abe669SPeter Zijlstra printk(KERN_ERR " last function: "); 264*d5abe669SPeter Zijlstra print_symbol("%s\n", (unsigned long)f); 265*d5abe669SPeter Zijlstra debug_show_held_locks(current); 266*d5abe669SPeter Zijlstra dump_stack(); 267*d5abe669SPeter Zijlstra } 268*d5abe669SPeter Zijlstra 2691da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2701da177e4SLinus Torvalds cwq->remove_sequence++; 2711da177e4SLinus Torvalds wake_up(&cwq->work_done); 2721da177e4SLinus Torvalds } 2731da177e4SLinus Torvalds cwq->run_depth--; 2741da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 2751da177e4SLinus Torvalds } 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds static int worker_thread(void *__cwq) 2781da177e4SLinus Torvalds { 2791da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 2801da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current); 2811da177e4SLinus Torvalds struct k_sigaction sa; 2821da177e4SLinus Torvalds sigset_t blocked; 2831da177e4SLinus Torvalds 284341a5958SRafael J. Wysocki if (!cwq->freezeable) 2851da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds set_user_nice(current, -5); 2881da177e4SLinus Torvalds 2891da177e4SLinus Torvalds /* Block and flush all signals */ 2901da177e4SLinus Torvalds sigfillset(&blocked); 2911da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 2921da177e4SLinus Torvalds flush_signals(current); 2931da177e4SLinus Torvalds 29446934023SChristoph Lameter /* 29546934023SChristoph Lameter * We inherited MPOL_INTERLEAVE from the booting kernel. 29646934023SChristoph Lameter * Set MPOL_DEFAULT to insure node local allocations. 29746934023SChristoph Lameter */ 29846934023SChristoph Lameter numa_default_policy(); 29946934023SChristoph Lameter 3001da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 3011da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 3021da177e4SLinus Torvalds sa.sa.sa_flags = 0; 3031da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 3041da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 3051da177e4SLinus Torvalds 3061da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3071da177e4SLinus Torvalds while (!kthread_should_stop()) { 308341a5958SRafael J. Wysocki if (cwq->freezeable) 309341a5958SRafael J. Wysocki try_to_freeze(); 310341a5958SRafael J. Wysocki 3111da177e4SLinus Torvalds add_wait_queue(&cwq->more_work, &wait); 3121da177e4SLinus Torvalds if (list_empty(&cwq->worklist)) 3131da177e4SLinus Torvalds schedule(); 3141da177e4SLinus Torvalds else 3151da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3161da177e4SLinus Torvalds remove_wait_queue(&cwq->more_work, &wait); 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds if (!list_empty(&cwq->worklist)) 3191da177e4SLinus Torvalds run_workqueue(cwq); 3201da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 3211da177e4SLinus Torvalds } 3221da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 3231da177e4SLinus Torvalds return 0; 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 3271da177e4SLinus Torvalds { 3281da177e4SLinus Torvalds if (cwq->thread == current) { 3291da177e4SLinus Torvalds /* 3301da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 3311da177e4SLinus Torvalds * it by hand rather than deadlocking. 3321da177e4SLinus Torvalds */ 3331da177e4SLinus Torvalds run_workqueue(cwq); 3341da177e4SLinus Torvalds } else { 3351da177e4SLinus Torvalds DEFINE_WAIT(wait); 3361da177e4SLinus Torvalds long sequence_needed; 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 3391da177e4SLinus Torvalds sequence_needed = cwq->insert_sequence; 3401da177e4SLinus Torvalds 3411da177e4SLinus Torvalds while (sequence_needed - cwq->remove_sequence > 0) { 3421da177e4SLinus Torvalds prepare_to_wait(&cwq->work_done, &wait, 3431da177e4SLinus Torvalds TASK_UNINTERRUPTIBLE); 3441da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 3451da177e4SLinus Torvalds schedule(); 3461da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 3471da177e4SLinus Torvalds } 3481da177e4SLinus Torvalds finish_wait(&cwq->work_done, &wait); 3491da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 3501da177e4SLinus Torvalds } 3511da177e4SLinus Torvalds } 3521da177e4SLinus Torvalds 3530fcb78c2SRolf Eike Beer /** 3541da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 3550fcb78c2SRolf Eike Beer * @wq: workqueue to flush 3561da177e4SLinus Torvalds * 3571da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 3581da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 3591da177e4SLinus Torvalds * 3601da177e4SLinus Torvalds * This function will sample each workqueue's current insert_sequence number and 3611da177e4SLinus Torvalds * will sleep until the head sequence is greater than or equal to that. This 3621da177e4SLinus Torvalds * means that we sleep until all works which were queued on entry have been 3631da177e4SLinus Torvalds * handled, but we are not livelocked by new incoming ones. 3641da177e4SLinus Torvalds * 3651da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 3661da177e4SLinus Torvalds * helper threads to do it. 3671da177e4SLinus Torvalds */ 3681da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 3691da177e4SLinus Torvalds { 3701da177e4SLinus Torvalds might_sleep(); 3711da177e4SLinus Torvalds 3721da177e4SLinus Torvalds if (is_single_threaded(wq)) { 373bce61dd4SBen Collins /* Always use first cpu's area. */ 374f756d5e2SNathan Lynch flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 3751da177e4SLinus Torvalds } else { 3761da177e4SLinus Torvalds int cpu; 3771da177e4SLinus Torvalds 3789b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 3791da177e4SLinus Torvalds for_each_online_cpu(cpu) 38089ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 3819b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 3821da177e4SLinus Torvalds } 3831da177e4SLinus Torvalds } 384ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 3851da177e4SLinus Torvalds 3861da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 387341a5958SRafael J. Wysocki int cpu, int freezeable) 3881da177e4SLinus Torvalds { 38989ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 3901da177e4SLinus Torvalds struct task_struct *p; 3911da177e4SLinus Torvalds 3921da177e4SLinus Torvalds spin_lock_init(&cwq->lock); 3931da177e4SLinus Torvalds cwq->wq = wq; 3941da177e4SLinus Torvalds cwq->thread = NULL; 3951da177e4SLinus Torvalds cwq->insert_sequence = 0; 3961da177e4SLinus Torvalds cwq->remove_sequence = 0; 397341a5958SRafael J. Wysocki cwq->freezeable = freezeable; 3981da177e4SLinus Torvalds INIT_LIST_HEAD(&cwq->worklist); 3991da177e4SLinus Torvalds init_waitqueue_head(&cwq->more_work); 4001da177e4SLinus Torvalds init_waitqueue_head(&cwq->work_done); 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds if (is_single_threaded(wq)) 4031da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s", wq->name); 4041da177e4SLinus Torvalds else 4051da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); 4061da177e4SLinus Torvalds if (IS_ERR(p)) 4071da177e4SLinus Torvalds return NULL; 4081da177e4SLinus Torvalds cwq->thread = p; 4091da177e4SLinus Torvalds return p; 4101da177e4SLinus Torvalds } 4111da177e4SLinus Torvalds 4121da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name, 413341a5958SRafael J. Wysocki int singlethread, int freezeable) 4141da177e4SLinus Torvalds { 4151da177e4SLinus Torvalds int cpu, destroy = 0; 4161da177e4SLinus Torvalds struct workqueue_struct *wq; 4171da177e4SLinus Torvalds struct task_struct *p; 4181da177e4SLinus Torvalds 419dd392710SPekka J Enberg wq = kzalloc(sizeof(*wq), GFP_KERNEL); 4201da177e4SLinus Torvalds if (!wq) 4211da177e4SLinus Torvalds return NULL; 4221da177e4SLinus Torvalds 42389ada679SChristoph Lameter wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 424676121fcSBen Collins if (!wq->cpu_wq) { 425676121fcSBen Collins kfree(wq); 426676121fcSBen Collins return NULL; 427676121fcSBen Collins } 428676121fcSBen Collins 4291da177e4SLinus Torvalds wq->name = name; 4309b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 4311da177e4SLinus Torvalds if (singlethread) { 4321da177e4SLinus Torvalds INIT_LIST_HEAD(&wq->list); 433341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, singlethread_cpu, freezeable); 4341da177e4SLinus Torvalds if (!p) 4351da177e4SLinus Torvalds destroy = 1; 4361da177e4SLinus Torvalds else 4371da177e4SLinus Torvalds wake_up_process(p); 4381da177e4SLinus Torvalds } else { 4391da177e4SLinus Torvalds list_add(&wq->list, &workqueues); 4401da177e4SLinus Torvalds for_each_online_cpu(cpu) { 441341a5958SRafael J. Wysocki p = create_workqueue_thread(wq, cpu, freezeable); 4421da177e4SLinus Torvalds if (p) { 4431da177e4SLinus Torvalds kthread_bind(p, cpu); 4441da177e4SLinus Torvalds wake_up_process(p); 4451da177e4SLinus Torvalds } else 4461da177e4SLinus Torvalds destroy = 1; 4471da177e4SLinus Torvalds } 4481da177e4SLinus Torvalds } 4499b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 4501da177e4SLinus Torvalds 4511da177e4SLinus Torvalds /* 4521da177e4SLinus Torvalds * Was there any error during startup? If yes then clean up: 4531da177e4SLinus Torvalds */ 4541da177e4SLinus Torvalds if (destroy) { 4551da177e4SLinus Torvalds destroy_workqueue(wq); 4561da177e4SLinus Torvalds wq = NULL; 4571da177e4SLinus Torvalds } 4581da177e4SLinus Torvalds return wq; 4591da177e4SLinus Torvalds } 460ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue); 4611da177e4SLinus Torvalds 4621da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 4631da177e4SLinus Torvalds { 4641da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 4651da177e4SLinus Torvalds unsigned long flags; 4661da177e4SLinus Torvalds struct task_struct *p; 4671da177e4SLinus Torvalds 46889ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, cpu); 4691da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 4701da177e4SLinus Torvalds p = cwq->thread; 4711da177e4SLinus Torvalds cwq->thread = NULL; 4721da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 4731da177e4SLinus Torvalds if (p) 4741da177e4SLinus Torvalds kthread_stop(p); 4751da177e4SLinus Torvalds } 4761da177e4SLinus Torvalds 4770fcb78c2SRolf Eike Beer /** 4780fcb78c2SRolf Eike Beer * destroy_workqueue - safely terminate a workqueue 4790fcb78c2SRolf Eike Beer * @wq: target workqueue 4800fcb78c2SRolf Eike Beer * 4810fcb78c2SRolf Eike Beer * Safely destroy a workqueue. All work currently pending will be done first. 4820fcb78c2SRolf Eike Beer */ 4831da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq) 4841da177e4SLinus Torvalds { 4851da177e4SLinus Torvalds int cpu; 4861da177e4SLinus Torvalds 4871da177e4SLinus Torvalds flush_workqueue(wq); 4881da177e4SLinus Torvalds 4891da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 4909b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 4911da177e4SLinus Torvalds if (is_single_threaded(wq)) 492f756d5e2SNathan Lynch cleanup_workqueue_thread(wq, singlethread_cpu); 4931da177e4SLinus Torvalds else { 4941da177e4SLinus Torvalds for_each_online_cpu(cpu) 4951da177e4SLinus Torvalds cleanup_workqueue_thread(wq, cpu); 4961da177e4SLinus Torvalds list_del(&wq->list); 4971da177e4SLinus Torvalds } 4989b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 49989ada679SChristoph Lameter free_percpu(wq->cpu_wq); 5001da177e4SLinus Torvalds kfree(wq); 5011da177e4SLinus Torvalds } 502ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue); 5031da177e4SLinus Torvalds 5041da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 5051da177e4SLinus Torvalds 5060fcb78c2SRolf Eike Beer /** 5070fcb78c2SRolf Eike Beer * schedule_work - put work task in global workqueue 5080fcb78c2SRolf Eike Beer * @work: job to be done 5090fcb78c2SRolf Eike Beer * 5100fcb78c2SRolf Eike Beer * This puts a job in the kernel-global workqueue. 5110fcb78c2SRolf Eike Beer */ 5121da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 5131da177e4SLinus Torvalds { 5141da177e4SLinus Torvalds return queue_work(keventd_wq, work); 5151da177e4SLinus Torvalds } 516ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 5171da177e4SLinus Torvalds 5180fcb78c2SRolf Eike Beer /** 5190fcb78c2SRolf Eike Beer * schedule_delayed_work - put work task in global workqueue after delay 52052bad64dSDavid Howells * @dwork: job to be done 52152bad64dSDavid Howells * @delay: number of jiffies to wait or 0 for immediate execution 5220fcb78c2SRolf Eike Beer * 5230fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5240fcb78c2SRolf Eike Beer * workqueue. 5250fcb78c2SRolf Eike Beer */ 52652bad64dSDavid Howells int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) 5271da177e4SLinus Torvalds { 52852bad64dSDavid Howells return queue_delayed_work(keventd_wq, dwork, delay); 5291da177e4SLinus Torvalds } 530ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 5311da177e4SLinus Torvalds 5320fcb78c2SRolf Eike Beer /** 5330fcb78c2SRolf Eike Beer * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 5340fcb78c2SRolf Eike Beer * @cpu: cpu to use 53552bad64dSDavid Howells * @dwork: job to be done 5360fcb78c2SRolf Eike Beer * @delay: number of jiffies to wait 5370fcb78c2SRolf Eike Beer * 5380fcb78c2SRolf Eike Beer * After waiting for a given time this puts a job in the kernel-global 5390fcb78c2SRolf Eike Beer * workqueue on the specified CPU. 5400fcb78c2SRolf Eike Beer */ 5411da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 54252bad64dSDavid Howells struct delayed_work *dwork, unsigned long delay) 5431da177e4SLinus Torvalds { 54452bad64dSDavid Howells return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 5451da177e4SLinus Torvalds } 546ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 5471da177e4SLinus Torvalds 548b6136773SAndrew Morton /** 549b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 550b6136773SAndrew Morton * @func: the function to call 551b6136773SAndrew Morton * 552b6136773SAndrew Morton * Returns zero on success. 553b6136773SAndrew Morton * Returns -ve errno on failure. 554b6136773SAndrew Morton * 555b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 556b6136773SAndrew Morton * 557b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 558b6136773SAndrew Morton */ 55965f27f38SDavid Howells int schedule_on_each_cpu(work_func_t func) 56015316ba8SChristoph Lameter { 56115316ba8SChristoph Lameter int cpu; 562b6136773SAndrew Morton struct work_struct *works; 56315316ba8SChristoph Lameter 564b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 565b6136773SAndrew Morton if (!works) 56615316ba8SChristoph Lameter return -ENOMEM; 567b6136773SAndrew Morton 5689b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 56915316ba8SChristoph Lameter for_each_online_cpu(cpu) { 57065f27f38SDavid Howells INIT_WORK(per_cpu_ptr(works, cpu), func); 57115316ba8SChristoph Lameter __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 572b6136773SAndrew Morton per_cpu_ptr(works, cpu)); 57315316ba8SChristoph Lameter } 5749b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 57515316ba8SChristoph Lameter flush_workqueue(keventd_wq); 576b6136773SAndrew Morton free_percpu(works); 57715316ba8SChristoph Lameter return 0; 57815316ba8SChristoph Lameter } 57915316ba8SChristoph Lameter 5801da177e4SLinus Torvalds void flush_scheduled_work(void) 5811da177e4SLinus Torvalds { 5821da177e4SLinus Torvalds flush_workqueue(keventd_wq); 5831da177e4SLinus Torvalds } 584ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 5851da177e4SLinus Torvalds 5861da177e4SLinus Torvalds /** 5871da177e4SLinus Torvalds * cancel_rearming_delayed_workqueue - reliably kill off a delayed 5881da177e4SLinus Torvalds * work whose handler rearms the delayed work. 5891da177e4SLinus Torvalds * @wq: the controlling workqueue structure 59052bad64dSDavid Howells * @dwork: the delayed work struct 5911da177e4SLinus Torvalds */ 59281ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 59352bad64dSDavid Howells struct delayed_work *dwork) 5941da177e4SLinus Torvalds { 59552bad64dSDavid Howells while (!cancel_delayed_work(dwork)) 5961da177e4SLinus Torvalds flush_workqueue(wq); 5971da177e4SLinus Torvalds } 59881ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 5991da177e4SLinus Torvalds 6001da177e4SLinus Torvalds /** 6011da177e4SLinus Torvalds * cancel_rearming_delayed_work - reliably kill off a delayed keventd 6021da177e4SLinus Torvalds * work whose handler rearms the delayed work. 60352bad64dSDavid Howells * @dwork: the delayed work struct 6041da177e4SLinus Torvalds */ 60552bad64dSDavid Howells void cancel_rearming_delayed_work(struct delayed_work *dwork) 6061da177e4SLinus Torvalds { 60752bad64dSDavid Howells cancel_rearming_delayed_workqueue(keventd_wq, dwork); 6081da177e4SLinus Torvalds } 6091da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 6101da177e4SLinus Torvalds 6111fa44ecaSJames Bottomley /** 6121fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 6131fa44ecaSJames Bottomley * @fn: the function to execute 6141fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 6151fa44ecaSJames Bottomley * be available when the work executes) 6161fa44ecaSJames Bottomley * 6171fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 6181fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 6191fa44ecaSJames Bottomley * 6201fa44ecaSJames Bottomley * Returns: 0 - function was executed 6211fa44ecaSJames Bottomley * 1 - function was scheduled for execution 6221fa44ecaSJames Bottomley */ 62365f27f38SDavid Howells int execute_in_process_context(work_func_t fn, struct execute_work *ew) 6241fa44ecaSJames Bottomley { 6251fa44ecaSJames Bottomley if (!in_interrupt()) { 62665f27f38SDavid Howells fn(&ew->work); 6271fa44ecaSJames Bottomley return 0; 6281fa44ecaSJames Bottomley } 6291fa44ecaSJames Bottomley 63065f27f38SDavid Howells INIT_WORK(&ew->work, fn); 6311fa44ecaSJames Bottomley schedule_work(&ew->work); 6321fa44ecaSJames Bottomley 6331fa44ecaSJames Bottomley return 1; 6341fa44ecaSJames Bottomley } 6351fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 6361fa44ecaSJames Bottomley 6371da177e4SLinus Torvalds int keventd_up(void) 6381da177e4SLinus Torvalds { 6391da177e4SLinus Torvalds return keventd_wq != NULL; 6401da177e4SLinus Torvalds } 6411da177e4SLinus Torvalds 6421da177e4SLinus Torvalds int current_is_keventd(void) 6431da177e4SLinus Torvalds { 6441da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 6451da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 6461da177e4SLinus Torvalds int ret = 0; 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds BUG_ON(!keventd_wq); 6491da177e4SLinus Torvalds 65089ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 6511da177e4SLinus Torvalds if (current == cwq->thread) 6521da177e4SLinus Torvalds ret = 1; 6531da177e4SLinus Torvalds 6541da177e4SLinus Torvalds return ret; 6551da177e4SLinus Torvalds 6561da177e4SLinus Torvalds } 6571da177e4SLinus Torvalds 6581da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 6591da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */ 6601da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 6611da177e4SLinus Torvalds { 66289ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 663626ab0e6SOleg Nesterov struct list_head list; 6641da177e4SLinus Torvalds struct work_struct *work; 6651da177e4SLinus Torvalds 6661da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 667626ab0e6SOleg Nesterov list_replace_init(&cwq->worklist, &list); 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds while (!list_empty(&list)) { 6701da177e4SLinus Torvalds printk("Taking work for %s\n", wq->name); 6711da177e4SLinus Torvalds work = list_entry(list.next,struct work_struct,entry); 6721da177e4SLinus Torvalds list_del(&work->entry); 67389ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 6761da177e4SLinus Torvalds } 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */ 6799c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 6801da177e4SLinus Torvalds unsigned long action, 6811da177e4SLinus Torvalds void *hcpu) 6821da177e4SLinus Torvalds { 6831da177e4SLinus Torvalds unsigned int hotcpu = (unsigned long)hcpu; 6841da177e4SLinus Torvalds struct workqueue_struct *wq; 6851da177e4SLinus Torvalds 6861da177e4SLinus Torvalds switch (action) { 6871da177e4SLinus Torvalds case CPU_UP_PREPARE: 6889b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 6891da177e4SLinus Torvalds /* Create a new workqueue thread for it. */ 6901da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 691341a5958SRafael J. Wysocki if (!create_workqueue_thread(wq, hotcpu, 0)) { 6921da177e4SLinus Torvalds printk("workqueue for %i failed\n", hotcpu); 6931da177e4SLinus Torvalds return NOTIFY_BAD; 6941da177e4SLinus Torvalds } 6951da177e4SLinus Torvalds } 6961da177e4SLinus Torvalds break; 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds case CPU_ONLINE: 6991da177e4SLinus Torvalds /* Kick off worker threads. */ 7001da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 70189ada679SChristoph Lameter struct cpu_workqueue_struct *cwq; 70289ada679SChristoph Lameter 70389ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); 70489ada679SChristoph Lameter kthread_bind(cwq->thread, hotcpu); 70589ada679SChristoph Lameter wake_up_process(cwq->thread); 7061da177e4SLinus Torvalds } 7079b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7081da177e4SLinus Torvalds break; 7091da177e4SLinus Torvalds 7101da177e4SLinus Torvalds case CPU_UP_CANCELED: 7111da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 712fc75cdfaSHeiko Carstens if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) 713fc75cdfaSHeiko Carstens continue; 7141da177e4SLinus Torvalds /* Unbind so it can run. */ 71589ada679SChristoph Lameter kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 716a4c4af7cSHeiko Carstens any_online_cpu(cpu_online_map)); 7171da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 7181da177e4SLinus Torvalds } 7199b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7209b41ea72SAndrew Morton break; 7219b41ea72SAndrew Morton 7229b41ea72SAndrew Morton case CPU_DOWN_PREPARE: 7239b41ea72SAndrew Morton mutex_lock(&workqueue_mutex); 7249b41ea72SAndrew Morton break; 7259b41ea72SAndrew Morton 7269b41ea72SAndrew Morton case CPU_DOWN_FAILED: 7279b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7281da177e4SLinus Torvalds break; 7291da177e4SLinus Torvalds 7301da177e4SLinus Torvalds case CPU_DEAD: 7311da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 7321da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 7331da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 7341da177e4SLinus Torvalds take_over_work(wq, hotcpu); 7359b41ea72SAndrew Morton mutex_unlock(&workqueue_mutex); 7361da177e4SLinus Torvalds break; 7371da177e4SLinus Torvalds } 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds return NOTIFY_OK; 7401da177e4SLinus Torvalds } 7411da177e4SLinus Torvalds #endif 7421da177e4SLinus Torvalds 7431da177e4SLinus Torvalds void init_workqueues(void) 7441da177e4SLinus Torvalds { 745f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 7461da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 7471da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 7481da177e4SLinus Torvalds BUG_ON(!keventd_wq); 7491da177e4SLinus Torvalds } 7501da177e4SLinus Torvalds 751