1 /*
2 * Copyright (c) 2009 Pawel Jakub Dawidek <[email protected]>
3 * All rights reserved.
4 *
5 * Copyright (c) 2012 Spectra Logic Corporation. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/ck.h>
34 #include <sys/epoch.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/queue.h>
40 #include <sys/taskq.h>
41 #include <sys/taskqueue.h>
42 #include <sys/zfs_context.h>
43
44 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
45 #include <machine/pcb.h>
46 #endif
47
48 #include <vm/uma.h>
49
50 #if __FreeBSD_version < 1201522
51 #define taskqueue_start_threads_in_proc(tqp, count, pri, proc, name, ...) \
52 taskqueue_start_threads(tqp, count, pri, name, __VA_ARGS__)
53 #endif
54
55 static uint_t taskq_tsd;
56 static uma_zone_t taskq_zone;
57
58 taskq_t *system_taskq = NULL;
59 taskq_t *system_delay_taskq = NULL;
60 taskq_t *dynamic_taskq = NULL;
61
62 proc_t *system_proc;
63
64 extern int uma_align_cache;
65
66 static MALLOC_DEFINE(M_TASKQ, "taskq", "taskq structures");
67
68 static CK_LIST_HEAD(tqenthashhead, taskq_ent) *tqenthashtbl;
69 static unsigned long tqenthash;
70 static unsigned long tqenthashlock;
71 static struct sx *tqenthashtbl_lock;
72
73 static taskqid_t tqidnext;
74
75 #define TQIDHASH(tqid) (&tqenthashtbl[(tqid) & tqenthash])
76 #define TQIDHASHLOCK(tqid) (&tqenthashtbl_lock[((tqid) & tqenthashlock)])
77
78 #define TIMEOUT_TASK 1
79 #define NORMAL_TASK 2
80
81 static void
system_taskq_init(void * arg)82 system_taskq_init(void *arg)
83 {
84 int i;
85
86 tsd_create(&taskq_tsd, NULL);
87 tqenthashtbl = hashinit(mp_ncpus * 8, M_TASKQ, &tqenthash);
88 tqenthashlock = (tqenthash + 1) / 8;
89 if (tqenthashlock > 0)
90 tqenthashlock--;
91 tqenthashtbl_lock =
92 malloc(sizeof (*tqenthashtbl_lock) * (tqenthashlock + 1),
93 M_TASKQ, M_WAITOK | M_ZERO);
94 for (i = 0; i < tqenthashlock + 1; i++)
95 sx_init_flags(&tqenthashtbl_lock[i], "tqenthash", SX_DUPOK);
96 taskq_zone = uma_zcreate("taskq_zone", sizeof (taskq_ent_t),
97 NULL, NULL, NULL, NULL,
98 UMA_ALIGN_CACHE, 0);
99 system_taskq = taskq_create("system_taskq", mp_ncpus, minclsyspri,
100 0, 0, 0);
101 system_delay_taskq = taskq_create("system_delay_taskq", mp_ncpus,
102 minclsyspri, 0, 0, 0);
103 }
104 SYSINIT(system_taskq_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_init,
105 NULL);
106
107 static void
system_taskq_fini(void * arg)108 system_taskq_fini(void *arg)
109 {
110 int i;
111
112 taskq_destroy(system_delay_taskq);
113 taskq_destroy(system_taskq);
114 uma_zdestroy(taskq_zone);
115 tsd_destroy(&taskq_tsd);
116 for (i = 0; i < tqenthashlock + 1; i++)
117 sx_destroy(&tqenthashtbl_lock[i]);
118 for (i = 0; i < tqenthash + 1; i++)
119 VERIFY(CK_LIST_EMPTY(&tqenthashtbl[i]));
120 free(tqenthashtbl_lock, M_TASKQ);
121 free(tqenthashtbl, M_TASKQ);
122 }
123 SYSUNINIT(system_taskq_fini, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_fini,
124 NULL);
125
126 #ifdef __LP64__
127 static taskqid_t
__taskq_genid(void)128 __taskq_genid(void)
129 {
130 taskqid_t tqid;
131
132 /*
133 * Assume a 64-bit counter will not wrap in practice.
134 */
135 tqid = atomic_add_64_nv(&tqidnext, 1);
136 VERIFY(tqid);
137 return (tqid);
138 }
139 #else
140 static taskqid_t
__taskq_genid(void)141 __taskq_genid(void)
142 {
143 taskqid_t tqid;
144
145 for (;;) {
146 tqid = atomic_add_32_nv(&tqidnext, 1);
147 if (__predict_true(tqid != 0))
148 break;
149 }
150 VERIFY(tqid);
151 return (tqid);
152 }
153 #endif
154
155 static taskq_ent_t *
taskq_lookup(taskqid_t tqid)156 taskq_lookup(taskqid_t tqid)
157 {
158 taskq_ent_t *ent = NULL;
159
160 sx_xlock(TQIDHASHLOCK(tqid));
161 CK_LIST_FOREACH(ent, TQIDHASH(tqid), tqent_hash) {
162 if (ent->tqent_id == tqid)
163 break;
164 }
165 if (ent != NULL)
166 refcount_acquire(&ent->tqent_rc);
167 sx_xunlock(TQIDHASHLOCK(tqid));
168 return (ent);
169 }
170
171 static taskqid_t
taskq_insert(taskq_ent_t * ent)172 taskq_insert(taskq_ent_t *ent)
173 {
174 taskqid_t tqid;
175
176 tqid = __taskq_genid();
177 ent->tqent_id = tqid;
178 ent->tqent_registered = B_TRUE;
179 sx_xlock(TQIDHASHLOCK(tqid));
180 CK_LIST_INSERT_HEAD(TQIDHASH(tqid), ent, tqent_hash);
181 sx_xunlock(TQIDHASHLOCK(tqid));
182 return (tqid);
183 }
184
185 static void
taskq_remove(taskq_ent_t * ent)186 taskq_remove(taskq_ent_t *ent)
187 {
188 taskqid_t tqid = ent->tqent_id;
189
190 if (!ent->tqent_registered)
191 return;
192
193 sx_xlock(TQIDHASHLOCK(tqid));
194 CK_LIST_REMOVE(ent, tqent_hash);
195 sx_xunlock(TQIDHASHLOCK(tqid));
196 ent->tqent_registered = B_FALSE;
197 }
198
199 static void
taskq_tsd_set(void * context)200 taskq_tsd_set(void *context)
201 {
202 taskq_t *tq = context;
203
204 #if defined(__amd64__) || defined(__aarch64__)
205 if (context != NULL && tsd_get(taskq_tsd) == NULL)
206 fpu_kern_thread(FPU_KERN_NORMAL);
207 #endif
208 tsd_set(taskq_tsd, tq);
209 }
210
211 static taskq_t *
taskq_create_impl(const char * name,int nthreads,pri_t pri,proc_t * proc __maybe_unused,uint_t flags)212 taskq_create_impl(const char *name, int nthreads, pri_t pri,
213 proc_t *proc __maybe_unused, uint_t flags)
214 {
215 taskq_t *tq;
216
217 if ((flags & TASKQ_THREADS_CPU_PCT) != 0)
218 nthreads = MAX((mp_ncpus * nthreads) / 100, 1);
219
220 tq = kmem_alloc(sizeof (*tq), KM_SLEEP);
221 tq->tq_queue = taskqueue_create(name, M_WAITOK,
222 taskqueue_thread_enqueue, &tq->tq_queue);
223 taskqueue_set_callback(tq->tq_queue, TASKQUEUE_CALLBACK_TYPE_INIT,
224 taskq_tsd_set, tq);
225 taskqueue_set_callback(tq->tq_queue, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN,
226 taskq_tsd_set, NULL);
227 (void) taskqueue_start_threads_in_proc(&tq->tq_queue, nthreads, pri,
228 proc, "%s", name);
229
230 return ((taskq_t *)tq);
231 }
232
233 taskq_t *
taskq_create(const char * name,int nthreads,pri_t pri,int minalloc __unused,int maxalloc __unused,uint_t flags)234 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc __unused,
235 int maxalloc __unused, uint_t flags)
236 {
237 return (taskq_create_impl(name, nthreads, pri, system_proc, flags));
238 }
239
240 taskq_t *
taskq_create_proc(const char * name,int nthreads,pri_t pri,int minalloc __unused,int maxalloc __unused,proc_t * proc,uint_t flags)241 taskq_create_proc(const char *name, int nthreads, pri_t pri,
242 int minalloc __unused, int maxalloc __unused, proc_t *proc, uint_t flags)
243 {
244 return (taskq_create_impl(name, nthreads, pri, proc, flags));
245 }
246
247 void
taskq_destroy(taskq_t * tq)248 taskq_destroy(taskq_t *tq)
249 {
250
251 taskqueue_free(tq->tq_queue);
252 kmem_free(tq, sizeof (*tq));
253 }
254
255 int
taskq_member(taskq_t * tq,kthread_t * thread)256 taskq_member(taskq_t *tq, kthread_t *thread)
257 {
258
259 return (taskqueue_member(tq->tq_queue, thread));
260 }
261
262 taskq_t *
taskq_of_curthread(void)263 taskq_of_curthread(void)
264 {
265 return (tsd_get(taskq_tsd));
266 }
267
268 static void
taskq_free(taskq_ent_t * task)269 taskq_free(taskq_ent_t *task)
270 {
271 taskq_remove(task);
272 if (refcount_release(&task->tqent_rc))
273 uma_zfree(taskq_zone, task);
274 }
275
276 int
taskq_cancel_id(taskq_t * tq,taskqid_t tid)277 taskq_cancel_id(taskq_t *tq, taskqid_t tid)
278 {
279 uint32_t pend;
280 int rc;
281 taskq_ent_t *ent;
282
283 if (tid == 0)
284 return (0);
285
286 if ((ent = taskq_lookup(tid)) == NULL)
287 return (0);
288
289 ent->tqent_cancelled = B_TRUE;
290 if (ent->tqent_type == TIMEOUT_TASK) {
291 rc = taskqueue_cancel_timeout(tq->tq_queue,
292 &ent->tqent_timeout_task, &pend);
293 } else
294 rc = taskqueue_cancel(tq->tq_queue, &ent->tqent_task, &pend);
295 if (rc == EBUSY) {
296 taskqueue_drain(tq->tq_queue, &ent->tqent_task);
297 } else if (pend) {
298 /*
299 * Tasks normally free themselves when run, but here the task
300 * was cancelled so it did not free itself.
301 */
302 taskq_free(ent);
303 }
304 /* Free the extra reference we added with taskq_lookup. */
305 taskq_free(ent);
306 return (rc);
307 }
308
309 static void
taskq_run(void * arg,int pending __unused)310 taskq_run(void *arg, int pending __unused)
311 {
312 taskq_ent_t *task = arg;
313
314 if (!task->tqent_cancelled)
315 task->tqent_func(task->tqent_arg);
316 taskq_free(task);
317 }
318
319 taskqid_t
taskq_dispatch_delay(taskq_t * tq,task_func_t func,void * arg,uint_t flags,clock_t expire_time)320 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
321 uint_t flags, clock_t expire_time)
322 {
323 taskq_ent_t *task;
324 taskqid_t tqid;
325 clock_t timo;
326 int mflag;
327
328 timo = expire_time - ddi_get_lbolt();
329 if (timo <= 0)
330 return (taskq_dispatch(tq, func, arg, flags));
331
332 if ((flags & (TQ_SLEEP | TQ_NOQUEUE)) == TQ_SLEEP)
333 mflag = M_WAITOK;
334 else
335 mflag = M_NOWAIT;
336
337 task = uma_zalloc(taskq_zone, mflag);
338 if (task == NULL)
339 return (0);
340 task->tqent_func = func;
341 task->tqent_arg = arg;
342 task->tqent_type = TIMEOUT_TASK;
343 task->tqent_cancelled = B_FALSE;
344 refcount_init(&task->tqent_rc, 1);
345 tqid = taskq_insert(task);
346 TIMEOUT_TASK_INIT(tq->tq_queue, &task->tqent_timeout_task, 0,
347 taskq_run, task);
348
349 taskqueue_enqueue_timeout(tq->tq_queue, &task->tqent_timeout_task,
350 timo);
351 return (tqid);
352 }
353
354 taskqid_t
taskq_dispatch(taskq_t * tq,task_func_t func,void * arg,uint_t flags)355 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
356 {
357 taskq_ent_t *task;
358 int mflag, prio;
359 taskqid_t tqid;
360
361 if ((flags & (TQ_SLEEP | TQ_NOQUEUE)) == TQ_SLEEP)
362 mflag = M_WAITOK;
363 else
364 mflag = M_NOWAIT;
365 /*
366 * If TQ_FRONT is given, we want higher priority for this task, so it
367 * can go at the front of the queue.
368 */
369 prio = !!(flags & TQ_FRONT);
370
371 task = uma_zalloc(taskq_zone, mflag);
372 if (task == NULL)
373 return (0);
374 refcount_init(&task->tqent_rc, 1);
375 task->tqent_func = func;
376 task->tqent_arg = arg;
377 task->tqent_cancelled = B_FALSE;
378 task->tqent_type = NORMAL_TASK;
379 tqid = taskq_insert(task);
380 TASK_INIT(&task->tqent_task, prio, taskq_run, task);
381 taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
382 return (tqid);
383 }
384
385 static void
taskq_run_ent(void * arg,int pending __unused)386 taskq_run_ent(void *arg, int pending __unused)
387 {
388 taskq_ent_t *task = arg;
389
390 task->tqent_func(task->tqent_arg);
391 }
392
393 void
taskq_dispatch_ent(taskq_t * tq,task_func_t func,void * arg,uint32_t flags,taskq_ent_t * task)394 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint32_t flags,
395 taskq_ent_t *task)
396 {
397 int prio;
398
399 /*
400 * If TQ_FRONT is given, we want higher priority for this task, so it
401 * can go at the front of the queue.
402 */
403 prio = !!(flags & TQ_FRONT);
404 task->tqent_cancelled = B_FALSE;
405 task->tqent_registered = B_FALSE;
406 task->tqent_id = 0;
407 task->tqent_func = func;
408 task->tqent_arg = arg;
409
410 TASK_INIT(&task->tqent_task, prio, taskq_run_ent, task);
411 taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
412 }
413
414 void
taskq_wait(taskq_t * tq)415 taskq_wait(taskq_t *tq)
416 {
417 taskqueue_quiesce(tq->tq_queue);
418 }
419
420 void
taskq_wait_id(taskq_t * tq,taskqid_t tid)421 taskq_wait_id(taskq_t *tq, taskqid_t tid)
422 {
423 taskq_ent_t *ent;
424
425 if (tid == 0)
426 return;
427 if ((ent = taskq_lookup(tid)) == NULL)
428 return;
429
430 taskqueue_drain(tq->tq_queue, &ent->tqent_task);
431 taskq_free(ent);
432 }
433
434 void
taskq_wait_outstanding(taskq_t * tq,taskqid_t id __unused)435 taskq_wait_outstanding(taskq_t *tq, taskqid_t id __unused)
436 {
437 taskqueue_drain_all(tq->tq_queue);
438 }
439
440 int
taskq_empty_ent(taskq_ent_t * t)441 taskq_empty_ent(taskq_ent_t *t)
442 {
443 return (t->tqent_task.ta_pending == 0);
444 }
445