xref: /linux-6.15/kernel/workqueue_internal.h (revision 112202d9)
1ea138446STejun Heo /*
2ea138446STejun Heo  * kernel/workqueue_internal.h
3ea138446STejun Heo  *
4ea138446STejun Heo  * Workqueue internal header file.  Only to be included by workqueue and
5ea138446STejun Heo  * core kernel subsystems.
6ea138446STejun Heo  */
7ea138446STejun Heo #ifndef _KERNEL_WORKQUEUE_INTERNAL_H
8ea138446STejun Heo #define _KERNEL_WORKQUEUE_INTERNAL_H
9ea138446STejun Heo 
102eaebdb3STejun Heo #include <linux/workqueue.h>
1184b233adSTejun Heo #include <linux/kthread.h>
122eaebdb3STejun Heo 
132eaebdb3STejun Heo struct worker_pool;
142eaebdb3STejun Heo 
152eaebdb3STejun Heo /*
162eaebdb3STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers are
172eaebdb3STejun Heo  * either serving the manager role, on idle list or on busy hash.  For
182eaebdb3STejun Heo  * details on the locking annotation (L, I, X...), refer to workqueue.c.
192eaebdb3STejun Heo  *
202eaebdb3STejun Heo  * Only to be used in workqueue and async.
212eaebdb3STejun Heo  */
222eaebdb3STejun Heo struct worker {
232eaebdb3STejun Heo 	/* on idle list while idle, on busy hash table while busy */
242eaebdb3STejun Heo 	union {
252eaebdb3STejun Heo 		struct list_head	entry;	/* L: while idle */
262eaebdb3STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
272eaebdb3STejun Heo 	};
282eaebdb3STejun Heo 
292eaebdb3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
302eaebdb3STejun Heo 	work_func_t		current_func;	/* L: current_work's fn */
31*112202d9STejun Heo 	struct pool_workqueue	*current_pwq; /* L: current_work's pwq */
322eaebdb3STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
332eaebdb3STejun Heo 	struct task_struct	*task;		/* I: worker task */
342eaebdb3STejun Heo 	struct worker_pool	*pool;		/* I: the associated pool */
352eaebdb3STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
362eaebdb3STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
372eaebdb3STejun Heo 	unsigned int		flags;		/* X: flags */
382eaebdb3STejun Heo 	int			id;		/* I: worker id */
392eaebdb3STejun Heo 
402eaebdb3STejun Heo 	/* for rebinding worker to CPU */
412eaebdb3STejun Heo 	struct work_struct	rebind_work;	/* L: for busy worker */
422eaebdb3STejun Heo 
432eaebdb3STejun Heo 	/* used only by rescuers to point to the target workqueue */
442eaebdb3STejun Heo 	struct workqueue_struct	*rescue_wq;	/* I: the workqueue to rescue */
452eaebdb3STejun Heo };
462eaebdb3STejun Heo 
4784b233adSTejun Heo /**
4884b233adSTejun Heo  * current_wq_worker - return struct worker if %current is a workqueue worker
4984b233adSTejun Heo  */
5084b233adSTejun Heo static inline struct worker *current_wq_worker(void)
5184b233adSTejun Heo {
5284b233adSTejun Heo 	if (current->flags & PF_WQ_WORKER)
5384b233adSTejun Heo 		return kthread_data(current);
5484b233adSTejun Heo 	return NULL;
5584b233adSTejun Heo }
5684b233adSTejun Heo 
57ea138446STejun Heo /*
58ea138446STejun Heo  * Scheduler hooks for concurrency managed workqueue.  Only to be used from
59ea138446STejun Heo  * sched.c and workqueue.c.
60ea138446STejun Heo  */
61ea138446STejun Heo void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
62ea138446STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task,
63ea138446STejun Heo 				       unsigned int cpu);
64ea138446STejun Heo 
65ea138446STejun Heo #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
66