Lines Matching refs:worker
943 void __kthread_init_worker(struct kthread_worker *worker, in __kthread_init_worker() argument
947 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker()
948 raw_spin_lock_init(&worker->lock); in __kthread_init_worker()
949 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker()
950 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker()
951 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker()
972 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local
979 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn()
980 worker->task = current; in kthread_worker_fn()
982 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn()
990 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
991 worker->task = NULL; in kthread_worker_fn()
992 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
997 raw_spin_lock_irq(&worker->lock); in kthread_worker_fn()
998 if (!list_empty(&worker->work_list)) { in kthread_worker_fn()
999 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
1003 worker->current_work = work; in kthread_worker_fn()
1004 raw_spin_unlock_irq(&worker->lock); in kthread_worker_fn()
1037 struct kthread_worker *worker; in __kthread_create_worker_on_node() local
1040 worker = kzalloc(sizeof(*worker), GFP_KERNEL); in __kthread_create_worker_on_node()
1041 if (!worker) in __kthread_create_worker_on_node()
1044 kthread_init_worker(worker); in __kthread_create_worker_on_node()
1046 task = __kthread_create_on_node(kthread_worker_fn, worker, in __kthread_create_worker_on_node()
1051 worker->flags = flags; in __kthread_create_worker_on_node()
1052 worker->task = task; in __kthread_create_worker_on_node()
1054 return worker; in __kthread_create_worker_on_node()
1057 kfree(worker); in __kthread_create_worker_on_node()
1074 struct kthread_worker *worker; in kthread_create_worker_on_node() local
1078 worker = __kthread_create_worker_on_node(flags, node, namefmt, args); in kthread_create_worker_on_node()
1081 return worker; in kthread_create_worker_on_node()
1125 struct kthread_worker *worker; in kthread_create_worker_on_cpu() local
1127 worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu); in kthread_create_worker_on_cpu()
1128 if (!IS_ERR(worker)) in kthread_create_worker_on_cpu()
1129 kthread_bind(worker->task, cpu); in kthread_create_worker_on_cpu()
1131 return worker; in kthread_create_worker_on_cpu()
1140 static inline bool queuing_blocked(struct kthread_worker *worker, in queuing_blocked() argument
1143 lockdep_assert_held(&worker->lock); in queuing_blocked()
1148 static void kthread_insert_work_sanity_check(struct kthread_worker *worker, in kthread_insert_work_sanity_check() argument
1151 lockdep_assert_held(&worker->lock); in kthread_insert_work_sanity_check()
1154 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
1158 static void kthread_insert_work(struct kthread_worker *worker, in kthread_insert_work() argument
1162 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
1164 trace_sched_kthread_work_queue_work(worker, work); in kthread_insert_work()
1167 work->worker = worker; in kthread_insert_work()
1168 if (!worker->current_work && likely(worker->task)) in kthread_insert_work()
1169 wake_up_process(worker->task); in kthread_insert_work()
1184 bool kthread_queue_work(struct kthread_worker *worker, in kthread_queue_work() argument
1190 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_work()
1191 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
1192 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
1195 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_work()
1212 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn() local
1219 if (WARN_ON_ONCE(!worker)) in kthread_delayed_work_timer_fn()
1222 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1224 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
1230 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
1232 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_delayed_work_timer_fn()
1236 static void __kthread_queue_delayed_work(struct kthread_worker *worker, in __kthread_queue_delayed_work() argument
1252 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
1257 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
1259 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
1260 work->worker = worker; in __kthread_queue_delayed_work()
1280 bool kthread_queue_delayed_work(struct kthread_worker *worker, in kthread_queue_delayed_work() argument
1288 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_queue_delayed_work()
1290 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
1291 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_queue_delayed_work()
1295 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_queue_delayed_work()
1324 struct kthread_worker *worker; in kthread_flush_work() local
1327 worker = work->worker; in kthread_flush_work()
1328 if (!worker) in kthread_flush_work()
1331 raw_spin_lock_irq(&worker->lock); in kthread_flush_work()
1333 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
1336 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
1337 else if (worker->current_work == work) in kthread_flush_work()
1338 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
1339 worker->work_list.next); in kthread_flush_work()
1343 raw_spin_unlock_irq(&worker->lock); in kthread_flush_work()
1362 struct kthread_worker *worker = work->worker; in kthread_cancel_delayed_work_timer() local
1371 raw_spin_unlock_irqrestore(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1373 raw_spin_lock_irqsave(&worker->lock, *flags); in kthread_cancel_delayed_work_timer()
1427 bool kthread_mod_delayed_work(struct kthread_worker *worker, in kthread_mod_delayed_work() argument
1435 raw_spin_lock_irqsave(&worker->lock, flags); in kthread_mod_delayed_work()
1438 if (!work->worker) { in kthread_mod_delayed_work()
1444 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1467 __kthread_queue_delayed_work(worker, dwork, delay); in kthread_mod_delayed_work()
1469 raw_spin_unlock_irqrestore(&worker->lock, flags); in kthread_mod_delayed_work()
1476 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync() local
1480 if (!worker) in __kthread_cancel_work_sync()
1483 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1485 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1492 if (worker->current_work != work) in __kthread_cancel_work_sync()
1500 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1502 raw_spin_lock_irqsave(&worker->lock, flags); in __kthread_cancel_work_sync()
1506 raw_spin_unlock_irqrestore(&worker->lock, flags); in __kthread_cancel_work_sync()
1555 void kthread_flush_worker(struct kthread_worker *worker) in kthread_flush_worker() argument
1562 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()
1579 void kthread_destroy_worker(struct kthread_worker *worker) in kthread_destroy_worker() argument
1583 task = worker->task; in kthread_destroy_worker()
1587 kthread_flush_worker(worker); in kthread_destroy_worker()
1589 WARN_ON(!list_empty(&worker->delayed_work_list)); in kthread_destroy_worker()
1590 WARN_ON(!list_empty(&worker->work_list)); in kthread_destroy_worker()
1591 kfree(worker); in kthread_destroy_worker()