11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/kernel/workqueue.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 51da177e4SLinus Torvalds * arbitrary tasks in process context. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 81da177e4SLinus Torvalds * 91da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 101da177e4SLinus Torvalds * 111da177e4SLinus Torvalds * David Woodhouse <[email protected]> 121da177e4SLinus Torvalds * Andrew Morton <[email protected]> 131da177e4SLinus Torvalds * Kai Petzke <[email protected]> 141da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 1589ada679SChristoph Lameter * 1689ada679SChristoph Lameter * Made to use alloc_percpu by Christoph Lameter <[email protected]>. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds #include <linux/module.h> 201da177e4SLinus Torvalds #include <linux/kernel.h> 211da177e4SLinus Torvalds #include <linux/sched.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/signal.h> 241da177e4SLinus Torvalds #include <linux/completion.h> 251da177e4SLinus Torvalds #include <linux/workqueue.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cpu.h> 281da177e4SLinus Torvalds #include <linux/notifier.h> 291da177e4SLinus Torvalds #include <linux/kthread.h> 301fa44ecaSJames Bottomley #include <linux/hardirq.h> 311da177e4SLinus Torvalds 321da177e4SLinus Torvalds /* 33f756d5e2SNathan Lynch * The per-CPU workqueue (if single thread, we always use the first 34f756d5e2SNathan Lynch * possible cpu). 351da177e4SLinus Torvalds * 361da177e4SLinus Torvalds * The sequence counters are for flush_scheduled_work(). It wants to wait 371da177e4SLinus Torvalds * until until all currently-scheduled works are completed, but it doesn't 381da177e4SLinus Torvalds * want to be livelocked by new, incoming ones. So it waits until 391da177e4SLinus Torvalds * remove_sequence is >= the insert_sequence which pertained when 401da177e4SLinus Torvalds * flush_scheduled_work() was called. 411da177e4SLinus Torvalds */ 421da177e4SLinus Torvalds struct cpu_workqueue_struct { 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds spinlock_t lock; 451da177e4SLinus Torvalds 461da177e4SLinus Torvalds long remove_sequence; /* Least-recently added (next to run) */ 471da177e4SLinus Torvalds long insert_sequence; /* Next to add */ 481da177e4SLinus Torvalds 491da177e4SLinus Torvalds struct list_head worklist; 501da177e4SLinus Torvalds wait_queue_head_t more_work; 511da177e4SLinus Torvalds wait_queue_head_t work_done; 521da177e4SLinus Torvalds 531da177e4SLinus Torvalds struct workqueue_struct *wq; 541da177e4SLinus Torvalds task_t *thread; 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 571da177e4SLinus Torvalds } ____cacheline_aligned; 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* 601da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 611da177e4SLinus Torvalds * per-CPU workqueues: 621da177e4SLinus Torvalds */ 631da177e4SLinus Torvalds struct workqueue_struct { 6489ada679SChristoph Lameter struct cpu_workqueue_struct *cpu_wq; 651da177e4SLinus Torvalds const char *name; 661da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 671da177e4SLinus Torvalds }; 681da177e4SLinus Torvalds 691da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 701da177e4SLinus Torvalds threads to each one as cpus come/go. */ 711da177e4SLinus Torvalds static DEFINE_SPINLOCK(workqueue_lock); 721da177e4SLinus Torvalds static LIST_HEAD(workqueues); 731da177e4SLinus Torvalds 74f756d5e2SNathan Lynch static int singlethread_cpu; 75f756d5e2SNathan Lynch 761da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 771da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 781da177e4SLinus Torvalds { 791da177e4SLinus Torvalds return list_empty(&wq->list); 801da177e4SLinus Torvalds } 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds /* Preempt must be disabled. */ 831da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 841da177e4SLinus Torvalds struct work_struct *work) 851da177e4SLinus Torvalds { 861da177e4SLinus Torvalds unsigned long flags; 871da177e4SLinus Torvalds 881da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 891da177e4SLinus Torvalds work->wq_data = cwq; 901da177e4SLinus Torvalds list_add_tail(&work->entry, &cwq->worklist); 911da177e4SLinus Torvalds cwq->insert_sequence++; 921da177e4SLinus Torvalds wake_up(&cwq->more_work); 931da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 941da177e4SLinus Torvalds } 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds /* 971da177e4SLinus Torvalds * Queue work on a workqueue. Return non-zero if it was successfully 981da177e4SLinus Torvalds * added. 991da177e4SLinus Torvalds * 1001da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 1011da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 1021da177e4SLinus Torvalds */ 1031da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 1041da177e4SLinus Torvalds { 1051da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds if (!test_and_set_bit(0, &work->pending)) { 1081da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 109f756d5e2SNathan Lynch cpu = singlethread_cpu; 1101da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 11189ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 1121da177e4SLinus Torvalds ret = 1; 1131da177e4SLinus Torvalds } 1141da177e4SLinus Torvalds put_cpu(); 1151da177e4SLinus Torvalds return ret; 1161da177e4SLinus Torvalds } 117*ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_work); 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds static void delayed_work_timer_fn(unsigned long __data) 1201da177e4SLinus Torvalds { 1211da177e4SLinus Torvalds struct work_struct *work = (struct work_struct *)__data; 1221da177e4SLinus Torvalds struct workqueue_struct *wq = work->wq_data; 1231da177e4SLinus Torvalds int cpu = smp_processor_id(); 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 126f756d5e2SNathan Lynch cpu = singlethread_cpu; 1271da177e4SLinus Torvalds 12889ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 1291da177e4SLinus Torvalds } 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 1321da177e4SLinus Torvalds struct work_struct *work, unsigned long delay) 1331da177e4SLinus Torvalds { 1341da177e4SLinus Torvalds int ret = 0; 1351da177e4SLinus Torvalds struct timer_list *timer = &work->timer; 1361da177e4SLinus Torvalds 1371da177e4SLinus Torvalds if (!test_and_set_bit(0, &work->pending)) { 1381da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 1391da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 1401da177e4SLinus Torvalds 1411da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 1421da177e4SLinus Torvalds work->wq_data = wq; 1431da177e4SLinus Torvalds timer->expires = jiffies + delay; 1441da177e4SLinus Torvalds timer->data = (unsigned long)work; 1451da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 1461da177e4SLinus Torvalds add_timer(timer); 1471da177e4SLinus Torvalds ret = 1; 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds return ret; 1501da177e4SLinus Torvalds } 151*ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work); 1521da177e4SLinus Torvalds 1537a6bc1cdSVenkatesh Pallipadi int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1547a6bc1cdSVenkatesh Pallipadi struct work_struct *work, unsigned long delay) 1557a6bc1cdSVenkatesh Pallipadi { 1567a6bc1cdSVenkatesh Pallipadi int ret = 0; 1577a6bc1cdSVenkatesh Pallipadi struct timer_list *timer = &work->timer; 1587a6bc1cdSVenkatesh Pallipadi 1597a6bc1cdSVenkatesh Pallipadi if (!test_and_set_bit(0, &work->pending)) { 1607a6bc1cdSVenkatesh Pallipadi BUG_ON(timer_pending(timer)); 1617a6bc1cdSVenkatesh Pallipadi BUG_ON(!list_empty(&work->entry)); 1627a6bc1cdSVenkatesh Pallipadi 1637a6bc1cdSVenkatesh Pallipadi /* This stores wq for the moment, for the timer_fn */ 1647a6bc1cdSVenkatesh Pallipadi work->wq_data = wq; 1657a6bc1cdSVenkatesh Pallipadi timer->expires = jiffies + delay; 1667a6bc1cdSVenkatesh Pallipadi timer->data = (unsigned long)work; 1677a6bc1cdSVenkatesh Pallipadi timer->function = delayed_work_timer_fn; 1687a6bc1cdSVenkatesh Pallipadi add_timer_on(timer, cpu); 1697a6bc1cdSVenkatesh Pallipadi ret = 1; 1707a6bc1cdSVenkatesh Pallipadi } 1717a6bc1cdSVenkatesh Pallipadi return ret; 1727a6bc1cdSVenkatesh Pallipadi } 173*ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1747a6bc1cdSVenkatesh Pallipadi 175858119e1SArjan van de Ven static void run_workqueue(struct cpu_workqueue_struct *cwq) 1761da177e4SLinus Torvalds { 1771da177e4SLinus Torvalds unsigned long flags; 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds /* 1801da177e4SLinus Torvalds * Keep taking off work from the queue until 1811da177e4SLinus Torvalds * done. 1821da177e4SLinus Torvalds */ 1831da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 1841da177e4SLinus Torvalds cwq->run_depth++; 1851da177e4SLinus Torvalds if (cwq->run_depth > 3) { 1861da177e4SLinus Torvalds /* morton gets to eat his hat */ 1871da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 1881da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 1891da177e4SLinus Torvalds dump_stack(); 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 1921da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 1931da177e4SLinus Torvalds struct work_struct, entry); 1941da177e4SLinus Torvalds void (*f) (void *) = work->func; 1951da177e4SLinus Torvalds void *data = work->data; 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 1981da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds BUG_ON(work->wq_data != cwq); 2011da177e4SLinus Torvalds clear_bit(0, &work->pending); 2021da177e4SLinus Torvalds f(data); 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 2051da177e4SLinus Torvalds cwq->remove_sequence++; 2061da177e4SLinus Torvalds wake_up(&cwq->work_done); 2071da177e4SLinus Torvalds } 2081da177e4SLinus Torvalds cwq->run_depth--; 2091da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 2101da177e4SLinus Torvalds } 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds static int worker_thread(void *__cwq) 2131da177e4SLinus Torvalds { 2141da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 2151da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current); 2161da177e4SLinus Torvalds struct k_sigaction sa; 2171da177e4SLinus Torvalds sigset_t blocked; 2181da177e4SLinus Torvalds 2191da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds set_user_nice(current, -5); 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds /* Block and flush all signals */ 2241da177e4SLinus Torvalds sigfillset(&blocked); 2251da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 2261da177e4SLinus Torvalds flush_signals(current); 2271da177e4SLinus Torvalds 2281da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 2291da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 2301da177e4SLinus Torvalds sa.sa.sa_flags = 0; 2311da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 2321da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 2331da177e4SLinus Torvalds 2341da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 2351da177e4SLinus Torvalds while (!kthread_should_stop()) { 2361da177e4SLinus Torvalds add_wait_queue(&cwq->more_work, &wait); 2371da177e4SLinus Torvalds if (list_empty(&cwq->worklist)) 2381da177e4SLinus Torvalds schedule(); 2391da177e4SLinus Torvalds else 2401da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 2411da177e4SLinus Torvalds remove_wait_queue(&cwq->more_work, &wait); 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds if (!list_empty(&cwq->worklist)) 2441da177e4SLinus Torvalds run_workqueue(cwq); 2451da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 2461da177e4SLinus Torvalds } 2471da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 2481da177e4SLinus Torvalds return 0; 2491da177e4SLinus Torvalds } 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 2521da177e4SLinus Torvalds { 2531da177e4SLinus Torvalds if (cwq->thread == current) { 2541da177e4SLinus Torvalds /* 2551da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 2561da177e4SLinus Torvalds * it by hand rather than deadlocking. 2571da177e4SLinus Torvalds */ 2581da177e4SLinus Torvalds run_workqueue(cwq); 2591da177e4SLinus Torvalds } else { 2601da177e4SLinus Torvalds DEFINE_WAIT(wait); 2611da177e4SLinus Torvalds long sequence_needed; 2621da177e4SLinus Torvalds 2631da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 2641da177e4SLinus Torvalds sequence_needed = cwq->insert_sequence; 2651da177e4SLinus Torvalds 2661da177e4SLinus Torvalds while (sequence_needed - cwq->remove_sequence > 0) { 2671da177e4SLinus Torvalds prepare_to_wait(&cwq->work_done, &wait, 2681da177e4SLinus Torvalds TASK_UNINTERRUPTIBLE); 2691da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 2701da177e4SLinus Torvalds schedule(); 2711da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 2721da177e4SLinus Torvalds } 2731da177e4SLinus Torvalds finish_wait(&cwq->work_done, &wait); 2741da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 2751da177e4SLinus Torvalds } 2761da177e4SLinus Torvalds } 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds /* 2791da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 2801da177e4SLinus Torvalds * 2811da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 2821da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 2831da177e4SLinus Torvalds * 2841da177e4SLinus Torvalds * This function will sample each workqueue's current insert_sequence number and 2851da177e4SLinus Torvalds * will sleep until the head sequence is greater than or equal to that. This 2861da177e4SLinus Torvalds * means that we sleep until all works which were queued on entry have been 2871da177e4SLinus Torvalds * handled, but we are not livelocked by new incoming ones. 2881da177e4SLinus Torvalds * 2891da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 2901da177e4SLinus Torvalds * helper threads to do it. 2911da177e4SLinus Torvalds */ 2921da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 2931da177e4SLinus Torvalds { 2941da177e4SLinus Torvalds might_sleep(); 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds if (is_single_threaded(wq)) { 297bce61dd4SBen Collins /* Always use first cpu's area. */ 298f756d5e2SNathan Lynch flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); 2991da177e4SLinus Torvalds } else { 3001da177e4SLinus Torvalds int cpu; 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds lock_cpu_hotplug(); 3031da177e4SLinus Torvalds for_each_online_cpu(cpu) 30489ada679SChristoph Lameter flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 3051da177e4SLinus Torvalds unlock_cpu_hotplug(); 3061da177e4SLinus Torvalds } 3071da177e4SLinus Torvalds } 308*ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(flush_workqueue); 3091da177e4SLinus Torvalds 3101da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 3111da177e4SLinus Torvalds int cpu) 3121da177e4SLinus Torvalds { 31389ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 3141da177e4SLinus Torvalds struct task_struct *p; 3151da177e4SLinus Torvalds 3161da177e4SLinus Torvalds spin_lock_init(&cwq->lock); 3171da177e4SLinus Torvalds cwq->wq = wq; 3181da177e4SLinus Torvalds cwq->thread = NULL; 3191da177e4SLinus Torvalds cwq->insert_sequence = 0; 3201da177e4SLinus Torvalds cwq->remove_sequence = 0; 3211da177e4SLinus Torvalds INIT_LIST_HEAD(&cwq->worklist); 3221da177e4SLinus Torvalds init_waitqueue_head(&cwq->more_work); 3231da177e4SLinus Torvalds init_waitqueue_head(&cwq->work_done); 3241da177e4SLinus Torvalds 3251da177e4SLinus Torvalds if (is_single_threaded(wq)) 3261da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s", wq->name); 3271da177e4SLinus Torvalds else 3281da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); 3291da177e4SLinus Torvalds if (IS_ERR(p)) 3301da177e4SLinus Torvalds return NULL; 3311da177e4SLinus Torvalds cwq->thread = p; 3321da177e4SLinus Torvalds return p; 3331da177e4SLinus Torvalds } 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name, 3361da177e4SLinus Torvalds int singlethread) 3371da177e4SLinus Torvalds { 3381da177e4SLinus Torvalds int cpu, destroy = 0; 3391da177e4SLinus Torvalds struct workqueue_struct *wq; 3401da177e4SLinus Torvalds struct task_struct *p; 3411da177e4SLinus Torvalds 342dd392710SPekka J Enberg wq = kzalloc(sizeof(*wq), GFP_KERNEL); 3431da177e4SLinus Torvalds if (!wq) 3441da177e4SLinus Torvalds return NULL; 3451da177e4SLinus Torvalds 34689ada679SChristoph Lameter wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 347676121fcSBen Collins if (!wq->cpu_wq) { 348676121fcSBen Collins kfree(wq); 349676121fcSBen Collins return NULL; 350676121fcSBen Collins } 351676121fcSBen Collins 3521da177e4SLinus Torvalds wq->name = name; 3531da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 3541da177e4SLinus Torvalds lock_cpu_hotplug(); 3551da177e4SLinus Torvalds if (singlethread) { 3561da177e4SLinus Torvalds INIT_LIST_HEAD(&wq->list); 357f756d5e2SNathan Lynch p = create_workqueue_thread(wq, singlethread_cpu); 3581da177e4SLinus Torvalds if (!p) 3591da177e4SLinus Torvalds destroy = 1; 3601da177e4SLinus Torvalds else 3611da177e4SLinus Torvalds wake_up_process(p); 3621da177e4SLinus Torvalds } else { 3631da177e4SLinus Torvalds spin_lock(&workqueue_lock); 3641da177e4SLinus Torvalds list_add(&wq->list, &workqueues); 3651da177e4SLinus Torvalds spin_unlock(&workqueue_lock); 3661da177e4SLinus Torvalds for_each_online_cpu(cpu) { 3671da177e4SLinus Torvalds p = create_workqueue_thread(wq, cpu); 3681da177e4SLinus Torvalds if (p) { 3691da177e4SLinus Torvalds kthread_bind(p, cpu); 3701da177e4SLinus Torvalds wake_up_process(p); 3711da177e4SLinus Torvalds } else 3721da177e4SLinus Torvalds destroy = 1; 3731da177e4SLinus Torvalds } 3741da177e4SLinus Torvalds } 3751da177e4SLinus Torvalds unlock_cpu_hotplug(); 3761da177e4SLinus Torvalds 3771da177e4SLinus Torvalds /* 3781da177e4SLinus Torvalds * Was there any error during startup? If yes then clean up: 3791da177e4SLinus Torvalds */ 3801da177e4SLinus Torvalds if (destroy) { 3811da177e4SLinus Torvalds destroy_workqueue(wq); 3821da177e4SLinus Torvalds wq = NULL; 3831da177e4SLinus Torvalds } 3841da177e4SLinus Torvalds return wq; 3851da177e4SLinus Torvalds } 386*ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(__create_workqueue); 3871da177e4SLinus Torvalds 3881da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 3891da177e4SLinus Torvalds { 3901da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 3911da177e4SLinus Torvalds unsigned long flags; 3921da177e4SLinus Torvalds struct task_struct *p; 3931da177e4SLinus Torvalds 39489ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, cpu); 3951da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 3961da177e4SLinus Torvalds p = cwq->thread; 3971da177e4SLinus Torvalds cwq->thread = NULL; 3981da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 3991da177e4SLinus Torvalds if (p) 4001da177e4SLinus Torvalds kthread_stop(p); 4011da177e4SLinus Torvalds } 4021da177e4SLinus Torvalds 4031da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq) 4041da177e4SLinus Torvalds { 4051da177e4SLinus Torvalds int cpu; 4061da177e4SLinus Torvalds 4071da177e4SLinus Torvalds flush_workqueue(wq); 4081da177e4SLinus Torvalds 4091da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 4101da177e4SLinus Torvalds lock_cpu_hotplug(); 4111da177e4SLinus Torvalds if (is_single_threaded(wq)) 412f756d5e2SNathan Lynch cleanup_workqueue_thread(wq, singlethread_cpu); 4131da177e4SLinus Torvalds else { 4141da177e4SLinus Torvalds for_each_online_cpu(cpu) 4151da177e4SLinus Torvalds cleanup_workqueue_thread(wq, cpu); 4161da177e4SLinus Torvalds spin_lock(&workqueue_lock); 4171da177e4SLinus Torvalds list_del(&wq->list); 4181da177e4SLinus Torvalds spin_unlock(&workqueue_lock); 4191da177e4SLinus Torvalds } 4201da177e4SLinus Torvalds unlock_cpu_hotplug(); 42189ada679SChristoph Lameter free_percpu(wq->cpu_wq); 4221da177e4SLinus Torvalds kfree(wq); 4231da177e4SLinus Torvalds } 424*ae90dd5dSDave Jones EXPORT_SYMBOL_GPL(destroy_workqueue); 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 4271da177e4SLinus Torvalds 4281da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 4291da177e4SLinus Torvalds { 4301da177e4SLinus Torvalds return queue_work(keventd_wq, work); 4311da177e4SLinus Torvalds } 432*ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_work); 4331da177e4SLinus Torvalds 4341da177e4SLinus Torvalds int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 4351da177e4SLinus Torvalds { 4361da177e4SLinus Torvalds return queue_delayed_work(keventd_wq, work, delay); 4371da177e4SLinus Torvalds } 438*ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work); 4391da177e4SLinus Torvalds 4401da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 4411da177e4SLinus Torvalds struct work_struct *work, unsigned long delay) 4421da177e4SLinus Torvalds { 4437a6bc1cdSVenkatesh Pallipadi return queue_delayed_work_on(cpu, keventd_wq, work, delay); 4441da177e4SLinus Torvalds } 445*ae90dd5dSDave Jones EXPORT_SYMBOL(schedule_delayed_work_on); 4461da177e4SLinus Torvalds 447b6136773SAndrew Morton /** 448b6136773SAndrew Morton * schedule_on_each_cpu - call a function on each online CPU from keventd 449b6136773SAndrew Morton * @func: the function to call 450b6136773SAndrew Morton * @info: a pointer to pass to func() 451b6136773SAndrew Morton * 452b6136773SAndrew Morton * Returns zero on success. 453b6136773SAndrew Morton * Returns -ve errno on failure. 454b6136773SAndrew Morton * 455b6136773SAndrew Morton * Appears to be racy against CPU hotplug. 456b6136773SAndrew Morton * 457b6136773SAndrew Morton * schedule_on_each_cpu() is very slow. 458b6136773SAndrew Morton */ 45915316ba8SChristoph Lameter int schedule_on_each_cpu(void (*func)(void *info), void *info) 46015316ba8SChristoph Lameter { 46115316ba8SChristoph Lameter int cpu; 462b6136773SAndrew Morton struct work_struct *works; 46315316ba8SChristoph Lameter 464b6136773SAndrew Morton works = alloc_percpu(struct work_struct); 465b6136773SAndrew Morton if (!works) 46615316ba8SChristoph Lameter return -ENOMEM; 467b6136773SAndrew Morton 46815316ba8SChristoph Lameter for_each_online_cpu(cpu) { 469b6136773SAndrew Morton INIT_WORK(per_cpu_ptr(works, cpu), func, info); 47015316ba8SChristoph Lameter __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 471b6136773SAndrew Morton per_cpu_ptr(works, cpu)); 47215316ba8SChristoph Lameter } 47315316ba8SChristoph Lameter flush_workqueue(keventd_wq); 474b6136773SAndrew Morton free_percpu(works); 47515316ba8SChristoph Lameter return 0; 47615316ba8SChristoph Lameter } 47715316ba8SChristoph Lameter 4781da177e4SLinus Torvalds void flush_scheduled_work(void) 4791da177e4SLinus Torvalds { 4801da177e4SLinus Torvalds flush_workqueue(keventd_wq); 4811da177e4SLinus Torvalds } 482*ae90dd5dSDave Jones EXPORT_SYMBOL(flush_scheduled_work); 4831da177e4SLinus Torvalds 4841da177e4SLinus Torvalds /** 4851da177e4SLinus Torvalds * cancel_rearming_delayed_workqueue - reliably kill off a delayed 4861da177e4SLinus Torvalds * work whose handler rearms the delayed work. 4871da177e4SLinus Torvalds * @wq: the controlling workqueue structure 4881da177e4SLinus Torvalds * @work: the delayed work struct 4891da177e4SLinus Torvalds */ 49081ddef77SJames Bottomley void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 4911da177e4SLinus Torvalds struct work_struct *work) 4921da177e4SLinus Torvalds { 4931da177e4SLinus Torvalds while (!cancel_delayed_work(work)) 4941da177e4SLinus Torvalds flush_workqueue(wq); 4951da177e4SLinus Torvalds } 49681ddef77SJames Bottomley EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 4971da177e4SLinus Torvalds 4981da177e4SLinus Torvalds /** 4991da177e4SLinus Torvalds * cancel_rearming_delayed_work - reliably kill off a delayed keventd 5001da177e4SLinus Torvalds * work whose handler rearms the delayed work. 5011da177e4SLinus Torvalds * @work: the delayed work struct 5021da177e4SLinus Torvalds */ 5031da177e4SLinus Torvalds void cancel_rearming_delayed_work(struct work_struct *work) 5041da177e4SLinus Torvalds { 5051da177e4SLinus Torvalds cancel_rearming_delayed_workqueue(keventd_wq, work); 5061da177e4SLinus Torvalds } 5071da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 5081da177e4SLinus Torvalds 5091fa44ecaSJames Bottomley /** 5101fa44ecaSJames Bottomley * execute_in_process_context - reliably execute the routine with user context 5111fa44ecaSJames Bottomley * @fn: the function to execute 5121fa44ecaSJames Bottomley * @data: data to pass to the function 5131fa44ecaSJames Bottomley * @ew: guaranteed storage for the execute work structure (must 5141fa44ecaSJames Bottomley * be available when the work executes) 5151fa44ecaSJames Bottomley * 5161fa44ecaSJames Bottomley * Executes the function immediately if process context is available, 5171fa44ecaSJames Bottomley * otherwise schedules the function for delayed execution. 5181fa44ecaSJames Bottomley * 5191fa44ecaSJames Bottomley * Returns: 0 - function was executed 5201fa44ecaSJames Bottomley * 1 - function was scheduled for execution 5211fa44ecaSJames Bottomley */ 5221fa44ecaSJames Bottomley int execute_in_process_context(void (*fn)(void *data), void *data, 5231fa44ecaSJames Bottomley struct execute_work *ew) 5241fa44ecaSJames Bottomley { 5251fa44ecaSJames Bottomley if (!in_interrupt()) { 5261fa44ecaSJames Bottomley fn(data); 5271fa44ecaSJames Bottomley return 0; 5281fa44ecaSJames Bottomley } 5291fa44ecaSJames Bottomley 5301fa44ecaSJames Bottomley INIT_WORK(&ew->work, fn, data); 5311fa44ecaSJames Bottomley schedule_work(&ew->work); 5321fa44ecaSJames Bottomley 5331fa44ecaSJames Bottomley return 1; 5341fa44ecaSJames Bottomley } 5351fa44ecaSJames Bottomley EXPORT_SYMBOL_GPL(execute_in_process_context); 5361fa44ecaSJames Bottomley 5371da177e4SLinus Torvalds int keventd_up(void) 5381da177e4SLinus Torvalds { 5391da177e4SLinus Torvalds return keventd_wq != NULL; 5401da177e4SLinus Torvalds } 5411da177e4SLinus Torvalds 5421da177e4SLinus Torvalds int current_is_keventd(void) 5431da177e4SLinus Torvalds { 5441da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 5451da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 5461da177e4SLinus Torvalds int ret = 0; 5471da177e4SLinus Torvalds 5481da177e4SLinus Torvalds BUG_ON(!keventd_wq); 5491da177e4SLinus Torvalds 55089ada679SChristoph Lameter cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); 5511da177e4SLinus Torvalds if (current == cwq->thread) 5521da177e4SLinus Torvalds ret = 1; 5531da177e4SLinus Torvalds 5541da177e4SLinus Torvalds return ret; 5551da177e4SLinus Torvalds 5561da177e4SLinus Torvalds } 5571da177e4SLinus Torvalds 5581da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 5591da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */ 5601da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 5611da177e4SLinus Torvalds { 56289ada679SChristoph Lameter struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 563626ab0e6SOleg Nesterov struct list_head list; 5641da177e4SLinus Torvalds struct work_struct *work; 5651da177e4SLinus Torvalds 5661da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 567626ab0e6SOleg Nesterov list_replace_init(&cwq->worklist, &list); 5681da177e4SLinus Torvalds 5691da177e4SLinus Torvalds while (!list_empty(&list)) { 5701da177e4SLinus Torvalds printk("Taking work for %s\n", wq->name); 5711da177e4SLinus Torvalds work = list_entry(list.next,struct work_struct,entry); 5721da177e4SLinus Torvalds list_del(&work->entry); 57389ada679SChristoph Lameter __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work); 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds 5781da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */ 5799c7b216dSChandra Seetharaman static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 5801da177e4SLinus Torvalds unsigned long action, 5811da177e4SLinus Torvalds void *hcpu) 5821da177e4SLinus Torvalds { 5831da177e4SLinus Torvalds unsigned int hotcpu = (unsigned long)hcpu; 5841da177e4SLinus Torvalds struct workqueue_struct *wq; 5851da177e4SLinus Torvalds 5861da177e4SLinus Torvalds switch (action) { 5871da177e4SLinus Torvalds case CPU_UP_PREPARE: 5881da177e4SLinus Torvalds /* Create a new workqueue thread for it. */ 5891da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 590230649daSMika Kukkonen if (!create_workqueue_thread(wq, hotcpu)) { 5911da177e4SLinus Torvalds printk("workqueue for %i failed\n", hotcpu); 5921da177e4SLinus Torvalds return NOTIFY_BAD; 5931da177e4SLinus Torvalds } 5941da177e4SLinus Torvalds } 5951da177e4SLinus Torvalds break; 5961da177e4SLinus Torvalds 5971da177e4SLinus Torvalds case CPU_ONLINE: 5981da177e4SLinus Torvalds /* Kick off worker threads. */ 5991da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 60089ada679SChristoph Lameter struct cpu_workqueue_struct *cwq; 60189ada679SChristoph Lameter 60289ada679SChristoph Lameter cwq = per_cpu_ptr(wq->cpu_wq, hotcpu); 60389ada679SChristoph Lameter kthread_bind(cwq->thread, hotcpu); 60489ada679SChristoph Lameter wake_up_process(cwq->thread); 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds break; 6071da177e4SLinus Torvalds 6081da177e4SLinus Torvalds case CPU_UP_CANCELED: 6091da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 610fc75cdfaSHeiko Carstens if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) 611fc75cdfaSHeiko Carstens continue; 6121da177e4SLinus Torvalds /* Unbind so it can run. */ 61389ada679SChristoph Lameter kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 614a4c4af7cSHeiko Carstens any_online_cpu(cpu_online_map)); 6151da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 6161da177e4SLinus Torvalds } 6171da177e4SLinus Torvalds break; 6181da177e4SLinus Torvalds 6191da177e4SLinus Torvalds case CPU_DEAD: 6201da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 6211da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 6221da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 6231da177e4SLinus Torvalds take_over_work(wq, hotcpu); 6241da177e4SLinus Torvalds break; 6251da177e4SLinus Torvalds } 6261da177e4SLinus Torvalds 6271da177e4SLinus Torvalds return NOTIFY_OK; 6281da177e4SLinus Torvalds } 6291da177e4SLinus Torvalds #endif 6301da177e4SLinus Torvalds 6311da177e4SLinus Torvalds void init_workqueues(void) 6321da177e4SLinus Torvalds { 633f756d5e2SNathan Lynch singlethread_cpu = first_cpu(cpu_possible_map); 6341da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 6351da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 6361da177e4SLinus Torvalds BUG_ON(!keventd_wq); 6371da177e4SLinus Torvalds } 6381da177e4SLinus Torvalds 639