xref: /linux-6.15/include/linux/workqueue.h (revision a115bc07)
1 /*
2  * workqueue.h --- work queue handling for Linux.
3  */
4 
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
7 
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <asm/atomic.h>
13 
14 struct workqueue_struct;
15 
16 struct work_struct;
17 typedef void (*work_func_t)(struct work_struct *work);
18 
19 /*
20  * The first word is the work queue pointer and the flags rolled into
21  * one
22  */
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
24 
25 struct work_struct {
26 	atomic_long_t data;
27 #define WORK_STRUCT_PENDING 0		/* T if work item pending execution */
28 #define WORK_STRUCT_STATIC  1		/* static initializer (debugobjects) */
29 #define WORK_STRUCT_FLAG_MASK (3UL)
30 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
31 	struct list_head entry;
32 	work_func_t func;
33 #ifdef CONFIG_LOCKDEP
34 	struct lockdep_map lockdep_map;
35 #endif
36 };
37 
38 #define WORK_DATA_INIT()	ATOMIC_LONG_INIT(0)
39 #define WORK_DATA_STATIC_INIT()	ATOMIC_LONG_INIT(2)
40 
41 struct delayed_work {
42 	struct work_struct work;
43 	struct timer_list timer;
44 };
45 
46 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
47 {
48 	return container_of(work, struct delayed_work, work);
49 }
50 
51 struct execute_work {
52 	struct work_struct work;
53 };
54 
55 #ifdef CONFIG_LOCKDEP
56 /*
57  * NB: because we have to copy the lockdep_map, setting _key
58  * here is required, otherwise it could get initialised to the
59  * copy of the lockdep_map!
60  */
61 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
62 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
63 #else
64 #define __WORK_INIT_LOCKDEP_MAP(n, k)
65 #endif
66 
67 #define __WORK_INITIALIZER(n, f) {				\
68 	.data = WORK_DATA_STATIC_INIT(),			\
69 	.entry	= { &(n).entry, &(n).entry },			\
70 	.func = (f),						\
71 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))			\
72 	}
73 
74 #define __DELAYED_WORK_INITIALIZER(n, f) {			\
75 	.work = __WORK_INITIALIZER((n).work, (f)),		\
76 	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
77 	}
78 
79 #define DECLARE_WORK(n, f)					\
80 	struct work_struct n = __WORK_INITIALIZER(n, f)
81 
82 #define DECLARE_DELAYED_WORK(n, f)				\
83 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
84 
85 /*
86  * initialize a work item's function pointer
87  */
88 #define PREPARE_WORK(_work, _func)				\
89 	do {							\
90 		(_work)->func = (_func);			\
91 	} while (0)
92 
93 #define PREPARE_DELAYED_WORK(_work, _func)			\
94 	PREPARE_WORK(&(_work)->work, (_func))
95 
96 #ifdef CONFIG_DEBUG_OBJECTS_WORK
97 extern void __init_work(struct work_struct *work, int onstack);
98 extern void destroy_work_on_stack(struct work_struct *work);
99 #else
100 static inline void __init_work(struct work_struct *work, int onstack) { }
101 static inline void destroy_work_on_stack(struct work_struct *work) { }
102 #endif
103 
104 /*
105  * initialize all of a work item in one go
106  *
107  * NOTE! No point in using "atomic_long_set()": using a direct
108  * assignment of the work data initializer allows the compiler
109  * to generate better code.
110  */
111 #ifdef CONFIG_LOCKDEP
112 #define __INIT_WORK(_work, _func, _onstack)				\
113 	do {								\
114 		static struct lock_class_key __key;			\
115 									\
116 		__init_work((_work), _onstack);				\
117 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
118 		lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
119 		INIT_LIST_HEAD(&(_work)->entry);			\
120 		PREPARE_WORK((_work), (_func));				\
121 	} while (0)
122 #else
123 #define __INIT_WORK(_work, _func, _onstack)				\
124 	do {								\
125 		__init_work((_work), _onstack);				\
126 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
127 		INIT_LIST_HEAD(&(_work)->entry);			\
128 		PREPARE_WORK((_work), (_func));				\
129 	} while (0)
130 #endif
131 
132 #define INIT_WORK(_work, _func)					\
133 	do {							\
134 		__INIT_WORK((_work), (_func), 0);		\
135 	} while (0)
136 
137 #define INIT_WORK_ON_STACK(_work, _func)			\
138 	do {							\
139 		__INIT_WORK((_work), (_func), 1);		\
140 	} while (0)
141 
142 #define INIT_DELAYED_WORK(_work, _func)				\
143 	do {							\
144 		INIT_WORK(&(_work)->work, (_func));		\
145 		init_timer(&(_work)->timer);			\
146 	} while (0)
147 
148 #define INIT_DELAYED_WORK_ON_STACK(_work, _func)		\
149 	do {							\
150 		INIT_WORK_ON_STACK(&(_work)->work, (_func));	\
151 		init_timer_on_stack(&(_work)->timer);		\
152 	} while (0)
153 
154 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)		\
155 	do {							\
156 		INIT_WORK(&(_work)->work, (_func));		\
157 		init_timer_deferrable(&(_work)->timer);		\
158 	} while (0)
159 
160 /**
161  * work_pending - Find out whether a work item is currently pending
162  * @work: The work item in question
163  */
164 #define work_pending(work) \
165 	test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
166 
167 /**
168  * delayed_work_pending - Find out whether a delayable work item is currently
169  * pending
170  * @work: The work item in question
171  */
172 #define delayed_work_pending(w) \
173 	work_pending(&(w)->work)
174 
175 /**
176  * work_clear_pending - for internal use only, mark a work item as not pending
177  * @work: The work item in question
178  */
179 #define work_clear_pending(work) \
180 	clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
181 
182 
183 extern struct workqueue_struct *
184 __create_workqueue_key(const char *name, int singlethread,
185 		       int freezeable, int rt, struct lock_class_key *key,
186 		       const char *lock_name);
187 
188 #ifdef CONFIG_LOCKDEP
189 #define __create_workqueue(name, singlethread, freezeable, rt)	\
190 ({								\
191 	static struct lock_class_key __key;			\
192 	const char *__lock_name;				\
193 								\
194 	if (__builtin_constant_p(name))				\
195 		__lock_name = (name);				\
196 	else							\
197 		__lock_name = #name;				\
198 								\
199 	__create_workqueue_key((name), (singlethread),		\
200 			       (freezeable), (rt), &__key,	\
201 			       __lock_name);			\
202 })
203 #else
204 #define __create_workqueue(name, singlethread, freezeable, rt)	\
205 	__create_workqueue_key((name), (singlethread), (freezeable), (rt), \
206 			       NULL, NULL)
207 #endif
208 
209 #define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
210 #define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
211 #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
212 #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
213 
214 extern void destroy_workqueue(struct workqueue_struct *wq);
215 
216 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
217 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
218 			struct work_struct *work);
219 extern int queue_delayed_work(struct workqueue_struct *wq,
220 			struct delayed_work *work, unsigned long delay);
221 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
222 			struct delayed_work *work, unsigned long delay);
223 
224 extern void flush_workqueue(struct workqueue_struct *wq);
225 extern void flush_scheduled_work(void);
226 extern void flush_delayed_work(struct delayed_work *work);
227 
228 extern int schedule_work(struct work_struct *work);
229 extern int schedule_work_on(int cpu, struct work_struct *work);
230 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
231 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
232 					unsigned long delay);
233 extern int schedule_on_each_cpu(work_func_t func);
234 extern int current_is_keventd(void);
235 extern int keventd_up(void);
236 
237 extern void init_workqueues(void);
238 int execute_in_process_context(work_func_t fn, struct execute_work *);
239 
240 extern int flush_work(struct work_struct *work);
241 
242 extern int cancel_work_sync(struct work_struct *work);
243 
244 /*
245  * Kill off a pending schedule_delayed_work().  Note that the work callback
246  * function may still be running on return from cancel_delayed_work(), unless
247  * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
248  * cancel_work_sync() to wait on it.
249  */
250 static inline int cancel_delayed_work(struct delayed_work *work)
251 {
252 	int ret;
253 
254 	ret = del_timer_sync(&work->timer);
255 	if (ret)
256 		work_clear_pending(&work->work);
257 	return ret;
258 }
259 
260 /*
261  * Like above, but uses del_timer() instead of del_timer_sync(). This means,
262  * if it returns 0 the timer function may be running and the queueing is in
263  * progress.
264  */
265 static inline int __cancel_delayed_work(struct delayed_work *work)
266 {
267 	int ret;
268 
269 	ret = del_timer(&work->timer);
270 	if (ret)
271 		work_clear_pending(&work->work);
272 	return ret;
273 }
274 
275 extern int cancel_delayed_work_sync(struct delayed_work *work);
276 
277 /* Obsolete. use cancel_delayed_work_sync() */
278 static inline
279 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
280 					struct delayed_work *work)
281 {
282 	cancel_delayed_work_sync(work);
283 }
284 
285 /* Obsolete. use cancel_delayed_work_sync() */
286 static inline
287 void cancel_rearming_delayed_work(struct delayed_work *work)
288 {
289 	cancel_delayed_work_sync(work);
290 }
291 
292 #ifndef CONFIG_SMP
293 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
294 {
295 	return fn(arg);
296 }
297 #else
298 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
299 #endif /* CONFIG_SMP */
300 #endif
301