1ea138446STejun Heo /* 2ea138446STejun Heo * kernel/workqueue_internal.h 3ea138446STejun Heo * 4ea138446STejun Heo * Workqueue internal header file. Only to be included by workqueue and 5ea138446STejun Heo * core kernel subsystems. 6ea138446STejun Heo */ 7ea138446STejun Heo #ifndef _KERNEL_WORKQUEUE_INTERNAL_H 8ea138446STejun Heo #define _KERNEL_WORKQUEUE_INTERNAL_H 9ea138446STejun Heo 102eaebdb3STejun Heo #include <linux/workqueue.h> 1184b233adSTejun Heo #include <linux/kthread.h> 122eaebdb3STejun Heo 132eaebdb3STejun Heo struct worker_pool; 142eaebdb3STejun Heo 152eaebdb3STejun Heo /* 162eaebdb3STejun Heo * The poor guys doing the actual heavy lifting. All on-duty workers are 172eaebdb3STejun Heo * either serving the manager role, on idle list or on busy hash. For 182eaebdb3STejun Heo * details on the locking annotation (L, I, X...), refer to workqueue.c. 192eaebdb3STejun Heo * 202eaebdb3STejun Heo * Only to be used in workqueue and async. 212eaebdb3STejun Heo */ 222eaebdb3STejun Heo struct worker { 232eaebdb3STejun Heo /* on idle list while idle, on busy hash table while busy */ 242eaebdb3STejun Heo union { 252eaebdb3STejun Heo struct list_head entry; /* L: while idle */ 262eaebdb3STejun Heo struct hlist_node hentry; /* L: while busy */ 272eaebdb3STejun Heo }; 282eaebdb3STejun Heo 292eaebdb3STejun Heo struct work_struct *current_work; /* L: work being processed */ 302eaebdb3STejun Heo work_func_t current_func; /* L: current_work's fn */ 31112202d9STejun Heo struct pool_workqueue *current_pwq; /* L: current_work's pwq */ 323d1cb205STejun Heo bool desc_valid; /* ->desc is valid */ 332eaebdb3STejun Heo struct list_head scheduled; /* L: scheduled works */ 343d1cb205STejun Heo 353d1cb205STejun Heo /* 64 bytes boundary on 64bit, 32 on 32bit */ 363d1cb205STejun Heo 372eaebdb3STejun Heo struct task_struct *task; /* I: worker task */ 382eaebdb3STejun Heo struct worker_pool *pool; /* I: the associated pool */ 39b3104104SLai Jiangshan /* L: for rescuers */ 403d1cb205STejun Heo 412eaebdb3STejun Heo unsigned long last_active; /* L: last active timestamp */ 422eaebdb3STejun Heo unsigned int flags; /* X: flags */ 432eaebdb3STejun Heo int id; /* I: worker id */ 442eaebdb3STejun Heo 453d1cb205STejun Heo /* 463d1cb205STejun Heo * Opaque string set with work_set_desc(). Printed out with task 473d1cb205STejun Heo * dump for debugging - WARN, BUG, panic or sysrq. 483d1cb205STejun Heo */ 493d1cb205STejun Heo char desc[WORKER_DESC_LEN]; 503d1cb205STejun Heo 512eaebdb3STejun Heo /* used only by rescuers to point to the target workqueue */ 522eaebdb3STejun Heo struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 532eaebdb3STejun Heo }; 542eaebdb3STejun Heo 5584b233adSTejun Heo /** 5684b233adSTejun Heo * current_wq_worker - return struct worker if %current is a workqueue worker 5784b233adSTejun Heo */ 5884b233adSTejun Heo static inline struct worker *current_wq_worker(void) 5984b233adSTejun Heo { 6084b233adSTejun Heo if (current->flags & PF_WQ_WORKER) 6184b233adSTejun Heo return kthread_data(current); 6284b233adSTejun Heo return NULL; 6384b233adSTejun Heo } 6484b233adSTejun Heo 65ea138446STejun Heo /* 66ea138446STejun Heo * Scheduler hooks for concurrency managed workqueue. Only to be used from 67*0a0fca9dSViresh Kumar * sched/core.c and workqueue.c. 68ea138446STejun Heo */ 69d84ff051STejun Heo void wq_worker_waking_up(struct task_struct *task, int cpu); 70d84ff051STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); 71ea138446STejun Heo 72ea138446STejun Heo #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ 73