1*1da177e4SLinus Torvalds /* 2*1da177e4SLinus Torvalds * linux/kernel/workqueue.c 3*1da177e4SLinus Torvalds * 4*1da177e4SLinus Torvalds * Generic mechanism for defining kernel helper threads for running 5*1da177e4SLinus Torvalds * arbitrary tasks in process context. 6*1da177e4SLinus Torvalds * 7*1da177e4SLinus Torvalds * Started by Ingo Molnar, Copyright (C) 2002 8*1da177e4SLinus Torvalds * 9*1da177e4SLinus Torvalds * Derived from the taskqueue/keventd code by: 10*1da177e4SLinus Torvalds * 11*1da177e4SLinus Torvalds * David Woodhouse <[email protected]> 12*1da177e4SLinus Torvalds * Andrew Morton <[email protected]> 13*1da177e4SLinus Torvalds * Kai Petzke <[email protected]> 14*1da177e4SLinus Torvalds * Theodore Ts'o <[email protected]> 15*1da177e4SLinus Torvalds */ 16*1da177e4SLinus Torvalds 17*1da177e4SLinus Torvalds #include <linux/module.h> 18*1da177e4SLinus Torvalds #include <linux/kernel.h> 19*1da177e4SLinus Torvalds #include <linux/sched.h> 20*1da177e4SLinus Torvalds #include <linux/init.h> 21*1da177e4SLinus Torvalds #include <linux/signal.h> 22*1da177e4SLinus Torvalds #include <linux/completion.h> 23*1da177e4SLinus Torvalds #include <linux/workqueue.h> 24*1da177e4SLinus Torvalds #include <linux/slab.h> 25*1da177e4SLinus Torvalds #include <linux/cpu.h> 26*1da177e4SLinus Torvalds #include <linux/notifier.h> 27*1da177e4SLinus Torvalds #include <linux/kthread.h> 28*1da177e4SLinus Torvalds 29*1da177e4SLinus Torvalds /* 30*1da177e4SLinus Torvalds * The per-CPU workqueue (if single thread, we always use cpu 0's). 31*1da177e4SLinus Torvalds * 32*1da177e4SLinus Torvalds * The sequence counters are for flush_scheduled_work(). It wants to wait 33*1da177e4SLinus Torvalds * until until all currently-scheduled works are completed, but it doesn't 34*1da177e4SLinus Torvalds * want to be livelocked by new, incoming ones. So it waits until 35*1da177e4SLinus Torvalds * remove_sequence is >= the insert_sequence which pertained when 36*1da177e4SLinus Torvalds * flush_scheduled_work() was called. 37*1da177e4SLinus Torvalds */ 38*1da177e4SLinus Torvalds struct cpu_workqueue_struct { 39*1da177e4SLinus Torvalds 40*1da177e4SLinus Torvalds spinlock_t lock; 41*1da177e4SLinus Torvalds 42*1da177e4SLinus Torvalds long remove_sequence; /* Least-recently added (next to run) */ 43*1da177e4SLinus Torvalds long insert_sequence; /* Next to add */ 44*1da177e4SLinus Torvalds 45*1da177e4SLinus Torvalds struct list_head worklist; 46*1da177e4SLinus Torvalds wait_queue_head_t more_work; 47*1da177e4SLinus Torvalds wait_queue_head_t work_done; 48*1da177e4SLinus Torvalds 49*1da177e4SLinus Torvalds struct workqueue_struct *wq; 50*1da177e4SLinus Torvalds task_t *thread; 51*1da177e4SLinus Torvalds 52*1da177e4SLinus Torvalds int run_depth; /* Detect run_workqueue() recursion depth */ 53*1da177e4SLinus Torvalds } ____cacheline_aligned; 54*1da177e4SLinus Torvalds 55*1da177e4SLinus Torvalds /* 56*1da177e4SLinus Torvalds * The externally visible workqueue abstraction is an array of 57*1da177e4SLinus Torvalds * per-CPU workqueues: 58*1da177e4SLinus Torvalds */ 59*1da177e4SLinus Torvalds struct workqueue_struct { 60*1da177e4SLinus Torvalds struct cpu_workqueue_struct cpu_wq[NR_CPUS]; 61*1da177e4SLinus Torvalds const char *name; 62*1da177e4SLinus Torvalds struct list_head list; /* Empty if single thread */ 63*1da177e4SLinus Torvalds }; 64*1da177e4SLinus Torvalds 65*1da177e4SLinus Torvalds /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 66*1da177e4SLinus Torvalds threads to each one as cpus come/go. */ 67*1da177e4SLinus Torvalds static DEFINE_SPINLOCK(workqueue_lock); 68*1da177e4SLinus Torvalds static LIST_HEAD(workqueues); 69*1da177e4SLinus Torvalds 70*1da177e4SLinus Torvalds /* If it's single threaded, it isn't in the list of workqueues. */ 71*1da177e4SLinus Torvalds static inline int is_single_threaded(struct workqueue_struct *wq) 72*1da177e4SLinus Torvalds { 73*1da177e4SLinus Torvalds return list_empty(&wq->list); 74*1da177e4SLinus Torvalds } 75*1da177e4SLinus Torvalds 76*1da177e4SLinus Torvalds /* Preempt must be disabled. */ 77*1da177e4SLinus Torvalds static void __queue_work(struct cpu_workqueue_struct *cwq, 78*1da177e4SLinus Torvalds struct work_struct *work) 79*1da177e4SLinus Torvalds { 80*1da177e4SLinus Torvalds unsigned long flags; 81*1da177e4SLinus Torvalds 82*1da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 83*1da177e4SLinus Torvalds work->wq_data = cwq; 84*1da177e4SLinus Torvalds list_add_tail(&work->entry, &cwq->worklist); 85*1da177e4SLinus Torvalds cwq->insert_sequence++; 86*1da177e4SLinus Torvalds wake_up(&cwq->more_work); 87*1da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 88*1da177e4SLinus Torvalds } 89*1da177e4SLinus Torvalds 90*1da177e4SLinus Torvalds /* 91*1da177e4SLinus Torvalds * Queue work on a workqueue. Return non-zero if it was successfully 92*1da177e4SLinus Torvalds * added. 93*1da177e4SLinus Torvalds * 94*1da177e4SLinus Torvalds * We queue the work to the CPU it was submitted, but there is no 95*1da177e4SLinus Torvalds * guarantee that it will be processed by that CPU. 96*1da177e4SLinus Torvalds */ 97*1da177e4SLinus Torvalds int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 98*1da177e4SLinus Torvalds { 99*1da177e4SLinus Torvalds int ret = 0, cpu = get_cpu(); 100*1da177e4SLinus Torvalds 101*1da177e4SLinus Torvalds if (!test_and_set_bit(0, &work->pending)) { 102*1da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 103*1da177e4SLinus Torvalds cpu = 0; 104*1da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 105*1da177e4SLinus Torvalds __queue_work(wq->cpu_wq + cpu, work); 106*1da177e4SLinus Torvalds ret = 1; 107*1da177e4SLinus Torvalds } 108*1da177e4SLinus Torvalds put_cpu(); 109*1da177e4SLinus Torvalds return ret; 110*1da177e4SLinus Torvalds } 111*1da177e4SLinus Torvalds 112*1da177e4SLinus Torvalds static void delayed_work_timer_fn(unsigned long __data) 113*1da177e4SLinus Torvalds { 114*1da177e4SLinus Torvalds struct work_struct *work = (struct work_struct *)__data; 115*1da177e4SLinus Torvalds struct workqueue_struct *wq = work->wq_data; 116*1da177e4SLinus Torvalds int cpu = smp_processor_id(); 117*1da177e4SLinus Torvalds 118*1da177e4SLinus Torvalds if (unlikely(is_single_threaded(wq))) 119*1da177e4SLinus Torvalds cpu = 0; 120*1da177e4SLinus Torvalds 121*1da177e4SLinus Torvalds __queue_work(wq->cpu_wq + cpu, work); 122*1da177e4SLinus Torvalds } 123*1da177e4SLinus Torvalds 124*1da177e4SLinus Torvalds int fastcall queue_delayed_work(struct workqueue_struct *wq, 125*1da177e4SLinus Torvalds struct work_struct *work, unsigned long delay) 126*1da177e4SLinus Torvalds { 127*1da177e4SLinus Torvalds int ret = 0; 128*1da177e4SLinus Torvalds struct timer_list *timer = &work->timer; 129*1da177e4SLinus Torvalds 130*1da177e4SLinus Torvalds if (!test_and_set_bit(0, &work->pending)) { 131*1da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 132*1da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 133*1da177e4SLinus Torvalds 134*1da177e4SLinus Torvalds /* This stores wq for the moment, for the timer_fn */ 135*1da177e4SLinus Torvalds work->wq_data = wq; 136*1da177e4SLinus Torvalds timer->expires = jiffies + delay; 137*1da177e4SLinus Torvalds timer->data = (unsigned long)work; 138*1da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 139*1da177e4SLinus Torvalds add_timer(timer); 140*1da177e4SLinus Torvalds ret = 1; 141*1da177e4SLinus Torvalds } 142*1da177e4SLinus Torvalds return ret; 143*1da177e4SLinus Torvalds } 144*1da177e4SLinus Torvalds 145*1da177e4SLinus Torvalds static inline void run_workqueue(struct cpu_workqueue_struct *cwq) 146*1da177e4SLinus Torvalds { 147*1da177e4SLinus Torvalds unsigned long flags; 148*1da177e4SLinus Torvalds 149*1da177e4SLinus Torvalds /* 150*1da177e4SLinus Torvalds * Keep taking off work from the queue until 151*1da177e4SLinus Torvalds * done. 152*1da177e4SLinus Torvalds */ 153*1da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 154*1da177e4SLinus Torvalds cwq->run_depth++; 155*1da177e4SLinus Torvalds if (cwq->run_depth > 3) { 156*1da177e4SLinus Torvalds /* morton gets to eat his hat */ 157*1da177e4SLinus Torvalds printk("%s: recursion depth exceeded: %d\n", 158*1da177e4SLinus Torvalds __FUNCTION__, cwq->run_depth); 159*1da177e4SLinus Torvalds dump_stack(); 160*1da177e4SLinus Torvalds } 161*1da177e4SLinus Torvalds while (!list_empty(&cwq->worklist)) { 162*1da177e4SLinus Torvalds struct work_struct *work = list_entry(cwq->worklist.next, 163*1da177e4SLinus Torvalds struct work_struct, entry); 164*1da177e4SLinus Torvalds void (*f) (void *) = work->func; 165*1da177e4SLinus Torvalds void *data = work->data; 166*1da177e4SLinus Torvalds 167*1da177e4SLinus Torvalds list_del_init(cwq->worklist.next); 168*1da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 169*1da177e4SLinus Torvalds 170*1da177e4SLinus Torvalds BUG_ON(work->wq_data != cwq); 171*1da177e4SLinus Torvalds clear_bit(0, &work->pending); 172*1da177e4SLinus Torvalds f(data); 173*1da177e4SLinus Torvalds 174*1da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 175*1da177e4SLinus Torvalds cwq->remove_sequence++; 176*1da177e4SLinus Torvalds wake_up(&cwq->work_done); 177*1da177e4SLinus Torvalds } 178*1da177e4SLinus Torvalds cwq->run_depth--; 179*1da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 180*1da177e4SLinus Torvalds } 181*1da177e4SLinus Torvalds 182*1da177e4SLinus Torvalds static int worker_thread(void *__cwq) 183*1da177e4SLinus Torvalds { 184*1da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = __cwq; 185*1da177e4SLinus Torvalds DECLARE_WAITQUEUE(wait, current); 186*1da177e4SLinus Torvalds struct k_sigaction sa; 187*1da177e4SLinus Torvalds sigset_t blocked; 188*1da177e4SLinus Torvalds 189*1da177e4SLinus Torvalds current->flags |= PF_NOFREEZE; 190*1da177e4SLinus Torvalds 191*1da177e4SLinus Torvalds set_user_nice(current, -5); 192*1da177e4SLinus Torvalds 193*1da177e4SLinus Torvalds /* Block and flush all signals */ 194*1da177e4SLinus Torvalds sigfillset(&blocked); 195*1da177e4SLinus Torvalds sigprocmask(SIG_BLOCK, &blocked, NULL); 196*1da177e4SLinus Torvalds flush_signals(current); 197*1da177e4SLinus Torvalds 198*1da177e4SLinus Torvalds /* SIG_IGN makes children autoreap: see do_notify_parent(). */ 199*1da177e4SLinus Torvalds sa.sa.sa_handler = SIG_IGN; 200*1da177e4SLinus Torvalds sa.sa.sa_flags = 0; 201*1da177e4SLinus Torvalds siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 202*1da177e4SLinus Torvalds do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 203*1da177e4SLinus Torvalds 204*1da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 205*1da177e4SLinus Torvalds while (!kthread_should_stop()) { 206*1da177e4SLinus Torvalds add_wait_queue(&cwq->more_work, &wait); 207*1da177e4SLinus Torvalds if (list_empty(&cwq->worklist)) 208*1da177e4SLinus Torvalds schedule(); 209*1da177e4SLinus Torvalds else 210*1da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 211*1da177e4SLinus Torvalds remove_wait_queue(&cwq->more_work, &wait); 212*1da177e4SLinus Torvalds 213*1da177e4SLinus Torvalds if (!list_empty(&cwq->worklist)) 214*1da177e4SLinus Torvalds run_workqueue(cwq); 215*1da177e4SLinus Torvalds set_current_state(TASK_INTERRUPTIBLE); 216*1da177e4SLinus Torvalds } 217*1da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 218*1da177e4SLinus Torvalds return 0; 219*1da177e4SLinus Torvalds } 220*1da177e4SLinus Torvalds 221*1da177e4SLinus Torvalds static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 222*1da177e4SLinus Torvalds { 223*1da177e4SLinus Torvalds if (cwq->thread == current) { 224*1da177e4SLinus Torvalds /* 225*1da177e4SLinus Torvalds * Probably keventd trying to flush its own queue. So simply run 226*1da177e4SLinus Torvalds * it by hand rather than deadlocking. 227*1da177e4SLinus Torvalds */ 228*1da177e4SLinus Torvalds run_workqueue(cwq); 229*1da177e4SLinus Torvalds } else { 230*1da177e4SLinus Torvalds DEFINE_WAIT(wait); 231*1da177e4SLinus Torvalds long sequence_needed; 232*1da177e4SLinus Torvalds 233*1da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 234*1da177e4SLinus Torvalds sequence_needed = cwq->insert_sequence; 235*1da177e4SLinus Torvalds 236*1da177e4SLinus Torvalds while (sequence_needed - cwq->remove_sequence > 0) { 237*1da177e4SLinus Torvalds prepare_to_wait(&cwq->work_done, &wait, 238*1da177e4SLinus Torvalds TASK_UNINTERRUPTIBLE); 239*1da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 240*1da177e4SLinus Torvalds schedule(); 241*1da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 242*1da177e4SLinus Torvalds } 243*1da177e4SLinus Torvalds finish_wait(&cwq->work_done, &wait); 244*1da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 245*1da177e4SLinus Torvalds } 246*1da177e4SLinus Torvalds } 247*1da177e4SLinus Torvalds 248*1da177e4SLinus Torvalds /* 249*1da177e4SLinus Torvalds * flush_workqueue - ensure that any scheduled work has run to completion. 250*1da177e4SLinus Torvalds * 251*1da177e4SLinus Torvalds * Forces execution of the workqueue and blocks until its completion. 252*1da177e4SLinus Torvalds * This is typically used in driver shutdown handlers. 253*1da177e4SLinus Torvalds * 254*1da177e4SLinus Torvalds * This function will sample each workqueue's current insert_sequence number and 255*1da177e4SLinus Torvalds * will sleep until the head sequence is greater than or equal to that. This 256*1da177e4SLinus Torvalds * means that we sleep until all works which were queued on entry have been 257*1da177e4SLinus Torvalds * handled, but we are not livelocked by new incoming ones. 258*1da177e4SLinus Torvalds * 259*1da177e4SLinus Torvalds * This function used to run the workqueues itself. Now we just wait for the 260*1da177e4SLinus Torvalds * helper threads to do it. 261*1da177e4SLinus Torvalds */ 262*1da177e4SLinus Torvalds void fastcall flush_workqueue(struct workqueue_struct *wq) 263*1da177e4SLinus Torvalds { 264*1da177e4SLinus Torvalds might_sleep(); 265*1da177e4SLinus Torvalds 266*1da177e4SLinus Torvalds if (is_single_threaded(wq)) { 267*1da177e4SLinus Torvalds /* Always use cpu 0's area. */ 268*1da177e4SLinus Torvalds flush_cpu_workqueue(wq->cpu_wq + 0); 269*1da177e4SLinus Torvalds } else { 270*1da177e4SLinus Torvalds int cpu; 271*1da177e4SLinus Torvalds 272*1da177e4SLinus Torvalds lock_cpu_hotplug(); 273*1da177e4SLinus Torvalds for_each_online_cpu(cpu) 274*1da177e4SLinus Torvalds flush_cpu_workqueue(wq->cpu_wq + cpu); 275*1da177e4SLinus Torvalds unlock_cpu_hotplug(); 276*1da177e4SLinus Torvalds } 277*1da177e4SLinus Torvalds } 278*1da177e4SLinus Torvalds 279*1da177e4SLinus Torvalds static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 280*1da177e4SLinus Torvalds int cpu) 281*1da177e4SLinus Torvalds { 282*1da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; 283*1da177e4SLinus Torvalds struct task_struct *p; 284*1da177e4SLinus Torvalds 285*1da177e4SLinus Torvalds spin_lock_init(&cwq->lock); 286*1da177e4SLinus Torvalds cwq->wq = wq; 287*1da177e4SLinus Torvalds cwq->thread = NULL; 288*1da177e4SLinus Torvalds cwq->insert_sequence = 0; 289*1da177e4SLinus Torvalds cwq->remove_sequence = 0; 290*1da177e4SLinus Torvalds INIT_LIST_HEAD(&cwq->worklist); 291*1da177e4SLinus Torvalds init_waitqueue_head(&cwq->more_work); 292*1da177e4SLinus Torvalds init_waitqueue_head(&cwq->work_done); 293*1da177e4SLinus Torvalds 294*1da177e4SLinus Torvalds if (is_single_threaded(wq)) 295*1da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s", wq->name); 296*1da177e4SLinus Torvalds else 297*1da177e4SLinus Torvalds p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu); 298*1da177e4SLinus Torvalds if (IS_ERR(p)) 299*1da177e4SLinus Torvalds return NULL; 300*1da177e4SLinus Torvalds cwq->thread = p; 301*1da177e4SLinus Torvalds return p; 302*1da177e4SLinus Torvalds } 303*1da177e4SLinus Torvalds 304*1da177e4SLinus Torvalds struct workqueue_struct *__create_workqueue(const char *name, 305*1da177e4SLinus Torvalds int singlethread) 306*1da177e4SLinus Torvalds { 307*1da177e4SLinus Torvalds int cpu, destroy = 0; 308*1da177e4SLinus Torvalds struct workqueue_struct *wq; 309*1da177e4SLinus Torvalds struct task_struct *p; 310*1da177e4SLinus Torvalds 311*1da177e4SLinus Torvalds BUG_ON(strlen(name) > 10); 312*1da177e4SLinus Torvalds 313*1da177e4SLinus Torvalds wq = kmalloc(sizeof(*wq), GFP_KERNEL); 314*1da177e4SLinus Torvalds if (!wq) 315*1da177e4SLinus Torvalds return NULL; 316*1da177e4SLinus Torvalds memset(wq, 0, sizeof(*wq)); 317*1da177e4SLinus Torvalds 318*1da177e4SLinus Torvalds wq->name = name; 319*1da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 320*1da177e4SLinus Torvalds lock_cpu_hotplug(); 321*1da177e4SLinus Torvalds if (singlethread) { 322*1da177e4SLinus Torvalds INIT_LIST_HEAD(&wq->list); 323*1da177e4SLinus Torvalds p = create_workqueue_thread(wq, 0); 324*1da177e4SLinus Torvalds if (!p) 325*1da177e4SLinus Torvalds destroy = 1; 326*1da177e4SLinus Torvalds else 327*1da177e4SLinus Torvalds wake_up_process(p); 328*1da177e4SLinus Torvalds } else { 329*1da177e4SLinus Torvalds spin_lock(&workqueue_lock); 330*1da177e4SLinus Torvalds list_add(&wq->list, &workqueues); 331*1da177e4SLinus Torvalds spin_unlock(&workqueue_lock); 332*1da177e4SLinus Torvalds for_each_online_cpu(cpu) { 333*1da177e4SLinus Torvalds p = create_workqueue_thread(wq, cpu); 334*1da177e4SLinus Torvalds if (p) { 335*1da177e4SLinus Torvalds kthread_bind(p, cpu); 336*1da177e4SLinus Torvalds wake_up_process(p); 337*1da177e4SLinus Torvalds } else 338*1da177e4SLinus Torvalds destroy = 1; 339*1da177e4SLinus Torvalds } 340*1da177e4SLinus Torvalds } 341*1da177e4SLinus Torvalds unlock_cpu_hotplug(); 342*1da177e4SLinus Torvalds 343*1da177e4SLinus Torvalds /* 344*1da177e4SLinus Torvalds * Was there any error during startup? If yes then clean up: 345*1da177e4SLinus Torvalds */ 346*1da177e4SLinus Torvalds if (destroy) { 347*1da177e4SLinus Torvalds destroy_workqueue(wq); 348*1da177e4SLinus Torvalds wq = NULL; 349*1da177e4SLinus Torvalds } 350*1da177e4SLinus Torvalds return wq; 351*1da177e4SLinus Torvalds } 352*1da177e4SLinus Torvalds 353*1da177e4SLinus Torvalds static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 354*1da177e4SLinus Torvalds { 355*1da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 356*1da177e4SLinus Torvalds unsigned long flags; 357*1da177e4SLinus Torvalds struct task_struct *p; 358*1da177e4SLinus Torvalds 359*1da177e4SLinus Torvalds cwq = wq->cpu_wq + cpu; 360*1da177e4SLinus Torvalds spin_lock_irqsave(&cwq->lock, flags); 361*1da177e4SLinus Torvalds p = cwq->thread; 362*1da177e4SLinus Torvalds cwq->thread = NULL; 363*1da177e4SLinus Torvalds spin_unlock_irqrestore(&cwq->lock, flags); 364*1da177e4SLinus Torvalds if (p) 365*1da177e4SLinus Torvalds kthread_stop(p); 366*1da177e4SLinus Torvalds } 367*1da177e4SLinus Torvalds 368*1da177e4SLinus Torvalds void destroy_workqueue(struct workqueue_struct *wq) 369*1da177e4SLinus Torvalds { 370*1da177e4SLinus Torvalds int cpu; 371*1da177e4SLinus Torvalds 372*1da177e4SLinus Torvalds flush_workqueue(wq); 373*1da177e4SLinus Torvalds 374*1da177e4SLinus Torvalds /* We don't need the distraction of CPUs appearing and vanishing. */ 375*1da177e4SLinus Torvalds lock_cpu_hotplug(); 376*1da177e4SLinus Torvalds if (is_single_threaded(wq)) 377*1da177e4SLinus Torvalds cleanup_workqueue_thread(wq, 0); 378*1da177e4SLinus Torvalds else { 379*1da177e4SLinus Torvalds for_each_online_cpu(cpu) 380*1da177e4SLinus Torvalds cleanup_workqueue_thread(wq, cpu); 381*1da177e4SLinus Torvalds spin_lock(&workqueue_lock); 382*1da177e4SLinus Torvalds list_del(&wq->list); 383*1da177e4SLinus Torvalds spin_unlock(&workqueue_lock); 384*1da177e4SLinus Torvalds } 385*1da177e4SLinus Torvalds unlock_cpu_hotplug(); 386*1da177e4SLinus Torvalds kfree(wq); 387*1da177e4SLinus Torvalds } 388*1da177e4SLinus Torvalds 389*1da177e4SLinus Torvalds static struct workqueue_struct *keventd_wq; 390*1da177e4SLinus Torvalds 391*1da177e4SLinus Torvalds int fastcall schedule_work(struct work_struct *work) 392*1da177e4SLinus Torvalds { 393*1da177e4SLinus Torvalds return queue_work(keventd_wq, work); 394*1da177e4SLinus Torvalds } 395*1da177e4SLinus Torvalds 396*1da177e4SLinus Torvalds int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 397*1da177e4SLinus Torvalds { 398*1da177e4SLinus Torvalds return queue_delayed_work(keventd_wq, work, delay); 399*1da177e4SLinus Torvalds } 400*1da177e4SLinus Torvalds 401*1da177e4SLinus Torvalds int schedule_delayed_work_on(int cpu, 402*1da177e4SLinus Torvalds struct work_struct *work, unsigned long delay) 403*1da177e4SLinus Torvalds { 404*1da177e4SLinus Torvalds int ret = 0; 405*1da177e4SLinus Torvalds struct timer_list *timer = &work->timer; 406*1da177e4SLinus Torvalds 407*1da177e4SLinus Torvalds if (!test_and_set_bit(0, &work->pending)) { 408*1da177e4SLinus Torvalds BUG_ON(timer_pending(timer)); 409*1da177e4SLinus Torvalds BUG_ON(!list_empty(&work->entry)); 410*1da177e4SLinus Torvalds /* This stores keventd_wq for the moment, for the timer_fn */ 411*1da177e4SLinus Torvalds work->wq_data = keventd_wq; 412*1da177e4SLinus Torvalds timer->expires = jiffies + delay; 413*1da177e4SLinus Torvalds timer->data = (unsigned long)work; 414*1da177e4SLinus Torvalds timer->function = delayed_work_timer_fn; 415*1da177e4SLinus Torvalds add_timer_on(timer, cpu); 416*1da177e4SLinus Torvalds ret = 1; 417*1da177e4SLinus Torvalds } 418*1da177e4SLinus Torvalds return ret; 419*1da177e4SLinus Torvalds } 420*1da177e4SLinus Torvalds 421*1da177e4SLinus Torvalds void flush_scheduled_work(void) 422*1da177e4SLinus Torvalds { 423*1da177e4SLinus Torvalds flush_workqueue(keventd_wq); 424*1da177e4SLinus Torvalds } 425*1da177e4SLinus Torvalds 426*1da177e4SLinus Torvalds /** 427*1da177e4SLinus Torvalds * cancel_rearming_delayed_workqueue - reliably kill off a delayed 428*1da177e4SLinus Torvalds * work whose handler rearms the delayed work. 429*1da177e4SLinus Torvalds * @wq: the controlling workqueue structure 430*1da177e4SLinus Torvalds * @work: the delayed work struct 431*1da177e4SLinus Torvalds */ 432*1da177e4SLinus Torvalds static void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, 433*1da177e4SLinus Torvalds struct work_struct *work) 434*1da177e4SLinus Torvalds { 435*1da177e4SLinus Torvalds while (!cancel_delayed_work(work)) 436*1da177e4SLinus Torvalds flush_workqueue(wq); 437*1da177e4SLinus Torvalds } 438*1da177e4SLinus Torvalds 439*1da177e4SLinus Torvalds /** 440*1da177e4SLinus Torvalds * cancel_rearming_delayed_work - reliably kill off a delayed keventd 441*1da177e4SLinus Torvalds * work whose handler rearms the delayed work. 442*1da177e4SLinus Torvalds * @work: the delayed work struct 443*1da177e4SLinus Torvalds */ 444*1da177e4SLinus Torvalds void cancel_rearming_delayed_work(struct work_struct *work) 445*1da177e4SLinus Torvalds { 446*1da177e4SLinus Torvalds cancel_rearming_delayed_workqueue(keventd_wq, work); 447*1da177e4SLinus Torvalds } 448*1da177e4SLinus Torvalds EXPORT_SYMBOL(cancel_rearming_delayed_work); 449*1da177e4SLinus Torvalds 450*1da177e4SLinus Torvalds int keventd_up(void) 451*1da177e4SLinus Torvalds { 452*1da177e4SLinus Torvalds return keventd_wq != NULL; 453*1da177e4SLinus Torvalds } 454*1da177e4SLinus Torvalds 455*1da177e4SLinus Torvalds int current_is_keventd(void) 456*1da177e4SLinus Torvalds { 457*1da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq; 458*1da177e4SLinus Torvalds int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */ 459*1da177e4SLinus Torvalds int ret = 0; 460*1da177e4SLinus Torvalds 461*1da177e4SLinus Torvalds BUG_ON(!keventd_wq); 462*1da177e4SLinus Torvalds 463*1da177e4SLinus Torvalds cwq = keventd_wq->cpu_wq + cpu; 464*1da177e4SLinus Torvalds if (current == cwq->thread) 465*1da177e4SLinus Torvalds ret = 1; 466*1da177e4SLinus Torvalds 467*1da177e4SLinus Torvalds return ret; 468*1da177e4SLinus Torvalds 469*1da177e4SLinus Torvalds } 470*1da177e4SLinus Torvalds 471*1da177e4SLinus Torvalds #ifdef CONFIG_HOTPLUG_CPU 472*1da177e4SLinus Torvalds /* Take the work from this (downed) CPU. */ 473*1da177e4SLinus Torvalds static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) 474*1da177e4SLinus Torvalds { 475*1da177e4SLinus Torvalds struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu; 476*1da177e4SLinus Torvalds LIST_HEAD(list); 477*1da177e4SLinus Torvalds struct work_struct *work; 478*1da177e4SLinus Torvalds 479*1da177e4SLinus Torvalds spin_lock_irq(&cwq->lock); 480*1da177e4SLinus Torvalds list_splice_init(&cwq->worklist, &list); 481*1da177e4SLinus Torvalds 482*1da177e4SLinus Torvalds while (!list_empty(&list)) { 483*1da177e4SLinus Torvalds printk("Taking work for %s\n", wq->name); 484*1da177e4SLinus Torvalds work = list_entry(list.next,struct work_struct,entry); 485*1da177e4SLinus Torvalds list_del(&work->entry); 486*1da177e4SLinus Torvalds __queue_work(wq->cpu_wq + smp_processor_id(), work); 487*1da177e4SLinus Torvalds } 488*1da177e4SLinus Torvalds spin_unlock_irq(&cwq->lock); 489*1da177e4SLinus Torvalds } 490*1da177e4SLinus Torvalds 491*1da177e4SLinus Torvalds /* We're holding the cpucontrol mutex here */ 492*1da177e4SLinus Torvalds static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 493*1da177e4SLinus Torvalds unsigned long action, 494*1da177e4SLinus Torvalds void *hcpu) 495*1da177e4SLinus Torvalds { 496*1da177e4SLinus Torvalds unsigned int hotcpu = (unsigned long)hcpu; 497*1da177e4SLinus Torvalds struct workqueue_struct *wq; 498*1da177e4SLinus Torvalds 499*1da177e4SLinus Torvalds switch (action) { 500*1da177e4SLinus Torvalds case CPU_UP_PREPARE: 501*1da177e4SLinus Torvalds /* Create a new workqueue thread for it. */ 502*1da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 503*1da177e4SLinus Torvalds if (create_workqueue_thread(wq, hotcpu) < 0) { 504*1da177e4SLinus Torvalds printk("workqueue for %i failed\n", hotcpu); 505*1da177e4SLinus Torvalds return NOTIFY_BAD; 506*1da177e4SLinus Torvalds } 507*1da177e4SLinus Torvalds } 508*1da177e4SLinus Torvalds break; 509*1da177e4SLinus Torvalds 510*1da177e4SLinus Torvalds case CPU_ONLINE: 511*1da177e4SLinus Torvalds /* Kick off worker threads. */ 512*1da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 513*1da177e4SLinus Torvalds kthread_bind(wq->cpu_wq[hotcpu].thread, hotcpu); 514*1da177e4SLinus Torvalds wake_up_process(wq->cpu_wq[hotcpu].thread); 515*1da177e4SLinus Torvalds } 516*1da177e4SLinus Torvalds break; 517*1da177e4SLinus Torvalds 518*1da177e4SLinus Torvalds case CPU_UP_CANCELED: 519*1da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) { 520*1da177e4SLinus Torvalds /* Unbind so it can run. */ 521*1da177e4SLinus Torvalds kthread_bind(wq->cpu_wq[hotcpu].thread, 522*1da177e4SLinus Torvalds smp_processor_id()); 523*1da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 524*1da177e4SLinus Torvalds } 525*1da177e4SLinus Torvalds break; 526*1da177e4SLinus Torvalds 527*1da177e4SLinus Torvalds case CPU_DEAD: 528*1da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 529*1da177e4SLinus Torvalds cleanup_workqueue_thread(wq, hotcpu); 530*1da177e4SLinus Torvalds list_for_each_entry(wq, &workqueues, list) 531*1da177e4SLinus Torvalds take_over_work(wq, hotcpu); 532*1da177e4SLinus Torvalds break; 533*1da177e4SLinus Torvalds } 534*1da177e4SLinus Torvalds 535*1da177e4SLinus Torvalds return NOTIFY_OK; 536*1da177e4SLinus Torvalds } 537*1da177e4SLinus Torvalds #endif 538*1da177e4SLinus Torvalds 539*1da177e4SLinus Torvalds void init_workqueues(void) 540*1da177e4SLinus Torvalds { 541*1da177e4SLinus Torvalds hotcpu_notifier(workqueue_cpu_callback, 0); 542*1da177e4SLinus Torvalds keventd_wq = create_workqueue("events"); 543*1da177e4SLinus Torvalds BUG_ON(!keventd_wq); 544*1da177e4SLinus Torvalds } 545*1da177e4SLinus Torvalds 546*1da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(__create_workqueue); 547*1da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(queue_work); 548*1da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(queue_delayed_work); 549*1da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(flush_workqueue); 550*1da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(destroy_workqueue); 551*1da177e4SLinus Torvalds 552*1da177e4SLinus Torvalds EXPORT_SYMBOL(schedule_work); 553*1da177e4SLinus Torvalds EXPORT_SYMBOL(schedule_delayed_work); 554*1da177e4SLinus Torvalds EXPORT_SYMBOL(schedule_delayed_work_on); 555*1da177e4SLinus Torvalds EXPORT_SYMBOL(flush_scheduled_work); 556