xref: /linux-6.15/kernel/workqueue_internal.h (revision 9b7f6597)
1ea138446STejun Heo /*
2ea138446STejun Heo  * kernel/workqueue_internal.h
3ea138446STejun Heo  *
4ea138446STejun Heo  * Workqueue internal header file.  Only to be included by workqueue and
5ea138446STejun Heo  * core kernel subsystems.
6ea138446STejun Heo  */
7ea138446STejun Heo #ifndef _KERNEL_WORKQUEUE_INTERNAL_H
8ea138446STejun Heo #define _KERNEL_WORKQUEUE_INTERNAL_H
9ea138446STejun Heo 
102eaebdb3STejun Heo #include <linux/workqueue.h>
1184b233adSTejun Heo #include <linux/kthread.h>
122eaebdb3STejun Heo 
132eaebdb3STejun Heo struct worker_pool;
142eaebdb3STejun Heo 
152eaebdb3STejun Heo /*
162eaebdb3STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers are
172eaebdb3STejun Heo  * either serving the manager role, on idle list or on busy hash.  For
182eaebdb3STejun Heo  * details on the locking annotation (L, I, X...), refer to workqueue.c.
192eaebdb3STejun Heo  *
202eaebdb3STejun Heo  * Only to be used in workqueue and async.
212eaebdb3STejun Heo  */
222eaebdb3STejun Heo struct worker {
232eaebdb3STejun Heo 	/* on idle list while idle, on busy hash table while busy */
242eaebdb3STejun Heo 	union {
252eaebdb3STejun Heo 		struct list_head	entry;	/* L: while idle */
262eaebdb3STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
272eaebdb3STejun Heo 	};
282eaebdb3STejun Heo 
292eaebdb3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
302eaebdb3STejun Heo 	work_func_t		current_func;	/* L: current_work's fn */
31112202d9STejun Heo 	struct pool_workqueue	*current_pwq; /* L: current_work's pwq */
323d1cb205STejun Heo 	bool			desc_valid;	/* ->desc is valid */
332eaebdb3STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
343d1cb205STejun Heo 
353d1cb205STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
363d1cb205STejun Heo 
372eaebdb3STejun Heo 	struct task_struct	*task;		/* I: worker task */
382eaebdb3STejun Heo 	struct worker_pool	*pool;		/* I: the associated pool */
39b3104104SLai Jiangshan 						/* L: for rescuers */
4092f9c5c4SLai Jiangshan 	struct list_head	node;		/* A: anchored at pool->workers */
4192f9c5c4SLai Jiangshan 						/* A: runs through worker->node */
423d1cb205STejun Heo 
432eaebdb3STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
442eaebdb3STejun Heo 	unsigned int		flags;		/* X: flags */
452eaebdb3STejun Heo 	int			id;		/* I: worker id */
462eaebdb3STejun Heo 
473d1cb205STejun Heo 	/*
483d1cb205STejun Heo 	 * Opaque string set with work_set_desc().  Printed out with task
493d1cb205STejun Heo 	 * dump for debugging - WARN, BUG, panic or sysrq.
503d1cb205STejun Heo 	 */
513d1cb205STejun Heo 	char			desc[WORKER_DESC_LEN];
523d1cb205STejun Heo 
532eaebdb3STejun Heo 	/* used only by rescuers to point to the target workqueue */
542eaebdb3STejun Heo 	struct workqueue_struct	*rescue_wq;	/* I: the workqueue to rescue */
552eaebdb3STejun Heo };
562eaebdb3STejun Heo 
5784b233adSTejun Heo /**
5884b233adSTejun Heo  * current_wq_worker - return struct worker if %current is a workqueue worker
5984b233adSTejun Heo  */
6084b233adSTejun Heo static inline struct worker *current_wq_worker(void)
6184b233adSTejun Heo {
6284b233adSTejun Heo 	if (current->flags & PF_WQ_WORKER)
6384b233adSTejun Heo 		return kthread_data(current);
6484b233adSTejun Heo 	return NULL;
6584b233adSTejun Heo }
6684b233adSTejun Heo 
67ea138446STejun Heo /*
68ea138446STejun Heo  * Scheduler hooks for concurrency managed workqueue.  Only to be used from
690a0fca9dSViresh Kumar  * sched/core.c and workqueue.c.
70ea138446STejun Heo  */
71d84ff051STejun Heo void wq_worker_waking_up(struct task_struct *task, int cpu);
72*9b7f6597SAlexander Gordeev struct task_struct *wq_worker_sleeping(struct task_struct *task);
73ea138446STejun Heo 
74ea138446STejun Heo #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
75