1ea138446STejun Heo /* 2ea138446STejun Heo * kernel/workqueue_internal.h 3ea138446STejun Heo * 4ea138446STejun Heo * Workqueue internal header file. Only to be included by workqueue and 5ea138446STejun Heo * core kernel subsystems. 6ea138446STejun Heo */ 7ea138446STejun Heo #ifndef _KERNEL_WORKQUEUE_INTERNAL_H 8ea138446STejun Heo #define _KERNEL_WORKQUEUE_INTERNAL_H 9ea138446STejun Heo 102eaebdb3STejun Heo #include <linux/workqueue.h> 1184b233adSTejun Heo #include <linux/kthread.h> 122eaebdb3STejun Heo 132eaebdb3STejun Heo struct worker_pool; 142eaebdb3STejun Heo 152eaebdb3STejun Heo /* 162eaebdb3STejun Heo * The poor guys doing the actual heavy lifting. All on-duty workers are 172eaebdb3STejun Heo * either serving the manager role, on idle list or on busy hash. For 182eaebdb3STejun Heo * details on the locking annotation (L, I, X...), refer to workqueue.c. 192eaebdb3STejun Heo * 202eaebdb3STejun Heo * Only to be used in workqueue and async. 212eaebdb3STejun Heo */ 222eaebdb3STejun Heo struct worker { 232eaebdb3STejun Heo /* on idle list while idle, on busy hash table while busy */ 242eaebdb3STejun Heo union { 252eaebdb3STejun Heo struct list_head entry; /* L: while idle */ 262eaebdb3STejun Heo struct hlist_node hentry; /* L: while busy */ 272eaebdb3STejun Heo }; 282eaebdb3STejun Heo 292eaebdb3STejun Heo struct work_struct *current_work; /* L: work being processed */ 302eaebdb3STejun Heo work_func_t current_func; /* L: current_work's fn */ 31112202d9STejun Heo struct pool_workqueue *current_pwq; /* L: current_work's pwq */ 322eaebdb3STejun Heo struct list_head scheduled; /* L: scheduled works */ 332eaebdb3STejun Heo struct task_struct *task; /* I: worker task */ 342eaebdb3STejun Heo struct worker_pool *pool; /* I: the associated pool */ 35b3104104SLai Jiangshan /* L: for rescuers */ 362eaebdb3STejun Heo /* 64 bytes boundary on 64bit, 32 on 32bit */ 372eaebdb3STejun Heo unsigned long last_active; /* L: last active timestamp */ 382eaebdb3STejun Heo unsigned int flags; /* X: flags */ 392eaebdb3STejun Heo int id; /* I: worker id */ 402eaebdb3STejun Heo 412eaebdb3STejun Heo /* for rebinding worker to CPU */ 422eaebdb3STejun Heo struct work_struct rebind_work; /* L: for busy worker */ 432eaebdb3STejun Heo 442eaebdb3STejun Heo /* used only by rescuers to point to the target workqueue */ 452eaebdb3STejun Heo struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 462eaebdb3STejun Heo }; 472eaebdb3STejun Heo 4884b233adSTejun Heo /** 4984b233adSTejun Heo * current_wq_worker - return struct worker if %current is a workqueue worker 5084b233adSTejun Heo */ 5184b233adSTejun Heo static inline struct worker *current_wq_worker(void) 5284b233adSTejun Heo { 5384b233adSTejun Heo if (current->flags & PF_WQ_WORKER) 5484b233adSTejun Heo return kthread_data(current); 5584b233adSTejun Heo return NULL; 5684b233adSTejun Heo } 5784b233adSTejun Heo 58ea138446STejun Heo /* 59ea138446STejun Heo * Scheduler hooks for concurrency managed workqueue. Only to be used from 60ea138446STejun Heo * sched.c and workqueue.c. 61ea138446STejun Heo */ 62*d84ff051STejun Heo void wq_worker_waking_up(struct task_struct *task, int cpu); 63*d84ff051STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); 64ea138446STejun Heo 65ea138446STejun Heo #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ 66