xref: /linux-6.15/kernel/workqueue_internal.h (revision 2eaebdb3)
1ea138446STejun Heo /*
2ea138446STejun Heo  * kernel/workqueue_internal.h
3ea138446STejun Heo  *
4ea138446STejun Heo  * Workqueue internal header file.  Only to be included by workqueue and
5ea138446STejun Heo  * core kernel subsystems.
6ea138446STejun Heo  */
7ea138446STejun Heo #ifndef _KERNEL_WORKQUEUE_INTERNAL_H
8ea138446STejun Heo #define _KERNEL_WORKQUEUE_INTERNAL_H
9ea138446STejun Heo 
10*2eaebdb3STejun Heo #include <linux/workqueue.h>
11*2eaebdb3STejun Heo 
12*2eaebdb3STejun Heo struct global_cwq;
13*2eaebdb3STejun Heo struct worker_pool;
14*2eaebdb3STejun Heo 
15*2eaebdb3STejun Heo /*
16*2eaebdb3STejun Heo  * The poor guys doing the actual heavy lifting.  All on-duty workers are
17*2eaebdb3STejun Heo  * either serving the manager role, on idle list or on busy hash.  For
18*2eaebdb3STejun Heo  * details on the locking annotation (L, I, X...), refer to workqueue.c.
19*2eaebdb3STejun Heo  *
20*2eaebdb3STejun Heo  * Only to be used in workqueue and async.
21*2eaebdb3STejun Heo  */
22*2eaebdb3STejun Heo struct worker {
23*2eaebdb3STejun Heo 	/* on idle list while idle, on busy hash table while busy */
24*2eaebdb3STejun Heo 	union {
25*2eaebdb3STejun Heo 		struct list_head	entry;	/* L: while idle */
26*2eaebdb3STejun Heo 		struct hlist_node	hentry;	/* L: while busy */
27*2eaebdb3STejun Heo 	};
28*2eaebdb3STejun Heo 
29*2eaebdb3STejun Heo 	struct work_struct	*current_work;	/* L: work being processed */
30*2eaebdb3STejun Heo 	work_func_t		current_func;	/* L: current_work's fn */
31*2eaebdb3STejun Heo 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
32*2eaebdb3STejun Heo 	struct list_head	scheduled;	/* L: scheduled works */
33*2eaebdb3STejun Heo 	struct task_struct	*task;		/* I: worker task */
34*2eaebdb3STejun Heo 	struct worker_pool	*pool;		/* I: the associated pool */
35*2eaebdb3STejun Heo 	/* 64 bytes boundary on 64bit, 32 on 32bit */
36*2eaebdb3STejun Heo 	unsigned long		last_active;	/* L: last active timestamp */
37*2eaebdb3STejun Heo 	unsigned int		flags;		/* X: flags */
38*2eaebdb3STejun Heo 	int			id;		/* I: worker id */
39*2eaebdb3STejun Heo 
40*2eaebdb3STejun Heo 	/* for rebinding worker to CPU */
41*2eaebdb3STejun Heo 	struct work_struct	rebind_work;	/* L: for busy worker */
42*2eaebdb3STejun Heo 
43*2eaebdb3STejun Heo 	/* used only by rescuers to point to the target workqueue */
44*2eaebdb3STejun Heo 	struct workqueue_struct	*rescue_wq;	/* I: the workqueue to rescue */
45*2eaebdb3STejun Heo };
46*2eaebdb3STejun Heo 
47ea138446STejun Heo /*
48ea138446STejun Heo  * Scheduler hooks for concurrency managed workqueue.  Only to be used from
49ea138446STejun Heo  * sched.c and workqueue.c.
50ea138446STejun Heo  */
51ea138446STejun Heo void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
52ea138446STejun Heo struct task_struct *wq_worker_sleeping(struct task_struct *task,
53ea138446STejun Heo 				       unsigned int cpu);
54ea138446STejun Heo 
55ea138446STejun Heo #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
56