1 /*-
2 * Copyright (c) 2017 Hans Petter Selasky
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 #include <sys/types.h>
29 #include <sys/malloc.h>
30 #include <sys/gtaskqueue.h>
31 #include <sys/proc.h>
32 #include <sys/sched.h>
33
34 #include <linux/compiler.h>
35 #include <linux/interrupt.h>
36 #include <linux/compat.h>
37
38 #define TASKLET_ST_IDLE 0
39 #define TASKLET_ST_BUSY 1
40 #define TASKLET_ST_EXEC 2
41 #define TASKLET_ST_LOOP 3
42
43 #define TASKLET_ST_CMPSET(ts, old, new) \
44 atomic_cmpset_int((volatile u_int *)&(ts)->tasklet_state, old, new)
45
46 #define TASKLET_ST_SET(ts, new) \
47 WRITE_ONCE(*(volatile u_int *)&(ts)->tasklet_state, new)
48
49 #define TASKLET_ST_GET(ts) \
50 READ_ONCE(*(volatile u_int *)&(ts)->tasklet_state)
51
52 struct tasklet_worker {
53 struct mtx mtx;
54 TAILQ_HEAD(tasklet_list, tasklet_struct) head;
55 struct grouptask gtask;
56 } __aligned(CACHE_LINE_SIZE);
57
58 #define TASKLET_WORKER_LOCK(tw) mtx_lock(&(tw)->mtx)
59 #define TASKLET_WORKER_UNLOCK(tw) mtx_unlock(&(tw)->mtx)
60
61 DPCPU_DEFINE_STATIC(struct tasklet_worker, tasklet_worker);
62
63 static void
tasklet_handler(void * arg)64 tasklet_handler(void *arg)
65 {
66 struct tasklet_worker *tw = (struct tasklet_worker *)arg;
67 struct tasklet_struct *ts;
68 struct tasklet_struct *last;
69
70 linux_set_current(curthread);
71
72 TASKLET_WORKER_LOCK(tw);
73 last = TAILQ_LAST(&tw->head, tasklet_list);
74 while (1) {
75 ts = TAILQ_FIRST(&tw->head);
76 if (ts == NULL)
77 break;
78 TAILQ_REMOVE(&tw->head, ts, entry);
79
80 if (!atomic_read(&ts->count)) {
81 TASKLET_WORKER_UNLOCK(tw);
82 do {
83 /* reset executing state */
84 TASKLET_ST_SET(ts, TASKLET_ST_EXEC);
85
86 if (ts->use_callback)
87 ts->callback(ts);
88 else
89 ts->func(ts->data);
90
91 } while (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC,
92 TASKLET_ST_IDLE) == 0);
93 TASKLET_WORKER_LOCK(tw);
94 } else {
95 TAILQ_INSERT_TAIL(&tw->head, ts, entry);
96 }
97 if (ts == last)
98 break;
99 }
100 TASKLET_WORKER_UNLOCK(tw);
101 }
102
103 static void
tasklet_subsystem_init(void * arg __unused)104 tasklet_subsystem_init(void *arg __unused)
105 {
106 struct tasklet_worker *tw;
107 char buf[32];
108 int i;
109
110 CPU_FOREACH(i) {
111 if (CPU_ABSENT(i))
112 continue;
113
114 tw = DPCPU_ID_PTR(i, tasklet_worker);
115
116 mtx_init(&tw->mtx, "linux_tasklet", NULL, MTX_DEF);
117 TAILQ_INIT(&tw->head);
118 GROUPTASK_INIT(&tw->gtask, 0, tasklet_handler, tw);
119 snprintf(buf, sizeof(buf), "softirq%d", i);
120 taskqgroup_attach_cpu(qgroup_softirq, &tw->gtask,
121 "tasklet", i, NULL, NULL, buf);
122 }
123 }
124 SYSINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_init, NULL);
125
126 static void
tasklet_subsystem_uninit(void * arg __unused)127 tasklet_subsystem_uninit(void *arg __unused)
128 {
129 struct tasklet_worker *tw;
130 int i;
131
132 taskqgroup_drain_all(qgroup_softirq);
133
134 CPU_FOREACH(i) {
135 if (CPU_ABSENT(i))
136 continue;
137
138 tw = DPCPU_ID_PTR(i, tasklet_worker);
139
140 taskqgroup_detach(qgroup_softirq, &tw->gtask);
141 mtx_destroy(&tw->mtx);
142 }
143 }
144 SYSUNINIT(linux_tasklet, SI_SUB_TASKQ, SI_ORDER_THIRD, tasklet_subsystem_uninit, NULL);
145
146 void
tasklet_init(struct tasklet_struct * ts,tasklet_func_t * func,unsigned long data)147 tasklet_init(struct tasklet_struct *ts,
148 tasklet_func_t *func, unsigned long data)
149 {
150 ts->entry.tqe_prev = NULL;
151 ts->entry.tqe_next = NULL;
152 ts->func = func;
153 ts->callback = NULL;
154 ts->data = data;
155 atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
156 atomic_set(&ts->count, 0);
157 ts->use_callback = false;
158 }
159
160 void
tasklet_setup(struct tasklet_struct * ts,tasklet_callback_t * c)161 tasklet_setup(struct tasklet_struct *ts, tasklet_callback_t *c)
162 {
163 ts->entry.tqe_prev = NULL;
164 ts->entry.tqe_next = NULL;
165 ts->func = NULL;
166 ts->callback = c;
167 ts->data = 0;
168 atomic_set_int(&ts->tasklet_state, TASKLET_ST_IDLE);
169 atomic_set(&ts->count, 0);
170 ts->use_callback = true;
171 }
172
173 void
local_bh_enable(void)174 local_bh_enable(void)
175 {
176 sched_unpin();
177 }
178
179 void
local_bh_disable(void)180 local_bh_disable(void)
181 {
182 sched_pin();
183 }
184
185 void
tasklet_schedule(struct tasklet_struct * ts)186 tasklet_schedule(struct tasklet_struct *ts)
187 {
188
189 /* tasklet is paused */
190 if (atomic_read(&ts->count))
191 return;
192
193 if (TASKLET_ST_CMPSET(ts, TASKLET_ST_EXEC, TASKLET_ST_LOOP)) {
194 /* tasklet_handler() will loop */
195 } else if (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY)) {
196 struct tasklet_worker *tw;
197
198 tw = &DPCPU_GET(tasklet_worker);
199
200 /* tasklet_handler() was not queued */
201 TASKLET_WORKER_LOCK(tw);
202 /* enqueue tasklet */
203 TAILQ_INSERT_TAIL(&tw->head, ts, entry);
204 /* schedule worker */
205 GROUPTASK_ENQUEUE(&tw->gtask);
206 TASKLET_WORKER_UNLOCK(tw);
207 } else {
208 /*
209 * tasklet_handler() is already executing
210 *
211 * If the state is neither EXEC nor IDLE, it is either
212 * LOOP or BUSY. If the state changed between the two
213 * CMPSET's above the only possible transitions by
214 * elimination are LOOP->EXEC and BUSY->EXEC. If a
215 * EXEC->LOOP transition was missed that is not a
216 * problem because the callback function is then
217 * already about to be called again.
218 */
219 }
220 }
221
222 void
tasklet_kill(struct tasklet_struct * ts)223 tasklet_kill(struct tasklet_struct *ts)
224 {
225
226 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
227
228 /* wait until tasklet is no longer busy */
229 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
230 pause("W", 1);
231 }
232
233 void
tasklet_enable(struct tasklet_struct * ts)234 tasklet_enable(struct tasklet_struct *ts)
235 {
236
237 atomic_dec(&ts->count);
238 }
239
240 void
tasklet_disable(struct tasklet_struct * ts)241 tasklet_disable(struct tasklet_struct *ts)
242 {
243
244 atomic_inc(&ts->count);
245 tasklet_unlock_wait(ts);
246 }
247
248 void
tasklet_disable_nosync(struct tasklet_struct * ts)249 tasklet_disable_nosync(struct tasklet_struct *ts)
250 {
251 atomic_inc(&ts->count);
252 barrier();
253 }
254
255 int
tasklet_trylock(struct tasklet_struct * ts)256 tasklet_trylock(struct tasklet_struct *ts)
257 {
258
259 return (TASKLET_ST_CMPSET(ts, TASKLET_ST_IDLE, TASKLET_ST_BUSY));
260 }
261
262 void
tasklet_unlock(struct tasklet_struct * ts)263 tasklet_unlock(struct tasklet_struct *ts)
264 {
265
266 TASKLET_ST_SET(ts, TASKLET_ST_IDLE);
267 }
268
269 void
tasklet_unlock_wait(struct tasklet_struct * ts)270 tasklet_unlock_wait(struct tasklet_struct *ts)
271 {
272
273 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "tasklet_kill() can sleep");
274
275 /* wait until tasklet is no longer busy */
276 while (TASKLET_ST_GET(ts) != TASKLET_ST_IDLE)
277 pause("W", 1);
278 }
279