xref: /linux-6.15/include/linux/workqueue.h (revision 87d37a4f)
1 /*
2  * workqueue.h --- work queue handling for Linux.
3  */
4 
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
7 
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <asm/atomic.h>
12 
13 struct workqueue_struct;
14 
15 struct work_struct;
16 typedef void (*work_func_t)(struct work_struct *work);
17 
18 /*
19  * The first word is the work queue pointer and the flags rolled into
20  * one
21  */
22 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
23 
24 struct work_struct {
25 	atomic_long_t data;
26 #define WORK_STRUCT_PENDING 0		/* T if work item pending execution */
27 #define WORK_STRUCT_FLAG_MASK (3UL)
28 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
29 	struct list_head entry;
30 	work_func_t func;
31 };
32 
33 #define WORK_DATA_INIT()	ATOMIC_LONG_INIT(0)
34 
35 struct delayed_work {
36 	struct work_struct work;
37 	struct timer_list timer;
38 };
39 
40 struct execute_work {
41 	struct work_struct work;
42 };
43 
44 #define __WORK_INITIALIZER(n, f) {				\
45 	.data = WORK_DATA_INIT(),				\
46 	.entry	= { &(n).entry, &(n).entry },			\
47 	.func = (f),						\
48 	}
49 
50 #define __DELAYED_WORK_INITIALIZER(n, f) {			\
51 	.work = __WORK_INITIALIZER((n).work, (f)),		\
52 	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
53 	}
54 
55 #define DECLARE_WORK(n, f)					\
56 	struct work_struct n = __WORK_INITIALIZER(n, f)
57 
58 #define DECLARE_DELAYED_WORK(n, f)				\
59 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
60 
61 /*
62  * initialize a work item's function pointer
63  */
64 #define PREPARE_WORK(_work, _func)				\
65 	do {							\
66 		(_work)->func = (_func);			\
67 	} while (0)
68 
69 #define PREPARE_DELAYED_WORK(_work, _func)			\
70 	PREPARE_WORK(&(_work)->work, (_func))
71 
72 /*
73  * initialize all of a work item in one go
74  *
75  * NOTE! No point in using "atomic_long_set()": useing a direct
76  * assignment of the work data initializer allows the compiler
77  * to generate better code.
78  */
79 #define INIT_WORK(_work, _func)						\
80 	do {								\
81 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
82 		INIT_LIST_HEAD(&(_work)->entry);			\
83 		PREPARE_WORK((_work), (_func));				\
84 	} while (0)
85 
86 #define INIT_DELAYED_WORK(_work, _func)				\
87 	do {							\
88 		INIT_WORK(&(_work)->work, (_func));		\
89 		init_timer(&(_work)->timer);			\
90 	} while (0)
91 
92 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)			\
93 	do {							\
94 		INIT_WORK(&(_work)->work, (_func));		\
95 		init_timer_deferrable(&(_work)->timer);		\
96 	} while (0)
97 
98 /**
99  * work_pending - Find out whether a work item is currently pending
100  * @work: The work item in question
101  */
102 #define work_pending(work) \
103 	test_bit(WORK_STRUCT_PENDING, work_data_bits(work))
104 
105 /**
106  * delayed_work_pending - Find out whether a delayable work item is currently
107  * pending
108  * @work: The work item in question
109  */
110 #define delayed_work_pending(w) \
111 	work_pending(&(w)->work)
112 
113 /**
114  * work_clear_pending - for internal use only, mark a work item as not pending
115  * @work: The work item in question
116  */
117 #define work_clear_pending(work) \
118 	clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
119 
120 
121 extern struct workqueue_struct *__create_workqueue(const char *name,
122 						    int singlethread,
123 						    int freezeable);
124 #define create_workqueue(name) __create_workqueue((name), 0, 0)
125 #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
126 #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
127 
128 extern void destroy_workqueue(struct workqueue_struct *wq);
129 
130 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
131 extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
132 			struct delayed_work *work, unsigned long delay));
133 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
134 			struct delayed_work *work, unsigned long delay);
135 
136 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
137 extern void flush_scheduled_work(void);
138 
139 extern int FASTCALL(schedule_work(struct work_struct *work));
140 extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
141 					unsigned long delay));
142 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
143 					unsigned long delay);
144 extern int schedule_on_each_cpu(work_func_t func);
145 extern int current_is_keventd(void);
146 extern int keventd_up(void);
147 
148 extern void init_workqueues(void);
149 int execute_in_process_context(work_func_t fn, struct execute_work *);
150 
151 extern void cancel_work_sync(struct work_struct *work);
152 
153 /*
154  * Kill off a pending schedule_delayed_work().  Note that the work callback
155  * function may still be running on return from cancel_delayed_work(), unless
156  * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
157  * cancel_work_sync() to wait on it.
158  */
159 static inline int cancel_delayed_work(struct delayed_work *work)
160 {
161 	int ret;
162 
163 	ret = del_timer_sync(&work->timer);
164 	if (ret)
165 		work_clear_pending(&work->work);
166 	return ret;
167 }
168 
169 extern void cancel_rearming_delayed_work(struct delayed_work *work);
170 
171 /* Obsolete. use cancel_rearming_delayed_work() */
172 static inline
173 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
174 					struct delayed_work *work)
175 {
176 	cancel_rearming_delayed_work(work);
177 }
178 
179 #endif
180