1a9643ea8Slogwang /*-
2*22ce4affSfengbojiang * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*22ce4affSfengbojiang *
4a9643ea8Slogwang * Copyright (c) 2000 Doug Rabson
5a9643ea8Slogwang * All rights reserved.
6a9643ea8Slogwang *
7a9643ea8Slogwang * Redistribution and use in source and binary forms, with or without
8a9643ea8Slogwang * modification, are permitted provided that the following conditions
9a9643ea8Slogwang * are met:
10a9643ea8Slogwang * 1. Redistributions of source code must retain the above copyright
11a9643ea8Slogwang * notice, this list of conditions and the following disclaimer.
12a9643ea8Slogwang * 2. Redistributions in binary form must reproduce the above copyright
13a9643ea8Slogwang * notice, this list of conditions and the following disclaimer in the
14a9643ea8Slogwang * documentation and/or other materials provided with the distribution.
15a9643ea8Slogwang *
16a9643ea8Slogwang * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17a9643ea8Slogwang * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18a9643ea8Slogwang * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19a9643ea8Slogwang * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20a9643ea8Slogwang * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21a9643ea8Slogwang * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22a9643ea8Slogwang * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23a9643ea8Slogwang * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24a9643ea8Slogwang * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25a9643ea8Slogwang * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26a9643ea8Slogwang * SUCH DAMAGE.
27a9643ea8Slogwang */
28a9643ea8Slogwang
29a9643ea8Slogwang #include <sys/cdefs.h>
30a9643ea8Slogwang __FBSDID("$FreeBSD$");
31a9643ea8Slogwang
32a9643ea8Slogwang #include <sys/param.h>
33a9643ea8Slogwang #include <sys/systm.h>
34a9643ea8Slogwang #include <sys/bus.h>
35a9643ea8Slogwang #include <sys/cpuset.h>
36a9643ea8Slogwang #include <sys/interrupt.h>
37a9643ea8Slogwang #include <sys/kernel.h>
38a9643ea8Slogwang #include <sys/kthread.h>
39a9643ea8Slogwang #include <sys/libkern.h>
40a9643ea8Slogwang #include <sys/limits.h>
41a9643ea8Slogwang #include <sys/lock.h>
42a9643ea8Slogwang #include <sys/malloc.h>
43a9643ea8Slogwang #include <sys/mutex.h>
44a9643ea8Slogwang #include <sys/proc.h>
45*22ce4affSfengbojiang #include <sys/epoch.h>
46a9643ea8Slogwang #include <sys/sched.h>
47a9643ea8Slogwang #include <sys/smp.h>
48a9643ea8Slogwang #include <sys/taskqueue.h>
49a9643ea8Slogwang #include <sys/unistd.h>
50a9643ea8Slogwang #include <machine/stdarg.h>
51a9643ea8Slogwang
52a9643ea8Slogwang static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
53a9643ea8Slogwang static void *taskqueue_giant_ih;
54a9643ea8Slogwang static void *taskqueue_ih;
55a9643ea8Slogwang static void taskqueue_fast_enqueue(void *);
56a9643ea8Slogwang static void taskqueue_swi_enqueue(void *);
57a9643ea8Slogwang static void taskqueue_swi_giant_enqueue(void *);
58a9643ea8Slogwang
59a9643ea8Slogwang struct taskqueue_busy {
60a9643ea8Slogwang struct task *tb_running;
61*22ce4affSfengbojiang u_int tb_seq;
62*22ce4affSfengbojiang LIST_ENTRY(taskqueue_busy) tb_link;
63a9643ea8Slogwang };
64a9643ea8Slogwang
65a9643ea8Slogwang struct taskqueue {
66a9643ea8Slogwang STAILQ_HEAD(, task) tq_queue;
67*22ce4affSfengbojiang LIST_HEAD(, taskqueue_busy) tq_active;
68*22ce4affSfengbojiang struct task *tq_hint;
69*22ce4affSfengbojiang u_int tq_seq;
70*22ce4affSfengbojiang int tq_callouts;
71*22ce4affSfengbojiang struct mtx_padalign tq_mutex;
72a9643ea8Slogwang taskqueue_enqueue_fn tq_enqueue;
73a9643ea8Slogwang void *tq_context;
74a9643ea8Slogwang char *tq_name;
75a9643ea8Slogwang struct thread **tq_threads;
76a9643ea8Slogwang int tq_tcount;
77a9643ea8Slogwang int tq_spin;
78a9643ea8Slogwang int tq_flags;
79a9643ea8Slogwang taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
80a9643ea8Slogwang void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
81a9643ea8Slogwang };
82a9643ea8Slogwang
83a9643ea8Slogwang #define TQ_FLAGS_ACTIVE (1 << 0)
84a9643ea8Slogwang #define TQ_FLAGS_BLOCKED (1 << 1)
85a9643ea8Slogwang #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
86a9643ea8Slogwang
87a9643ea8Slogwang #define DT_CALLOUT_ARMED (1 << 0)
88*22ce4affSfengbojiang #define DT_DRAIN_IN_PROGRESS (1 << 1)
89a9643ea8Slogwang
90a9643ea8Slogwang #define TQ_LOCK(tq) \
91a9643ea8Slogwang do { \
92a9643ea8Slogwang if ((tq)->tq_spin) \
93a9643ea8Slogwang mtx_lock_spin(&(tq)->tq_mutex); \
94a9643ea8Slogwang else \
95a9643ea8Slogwang mtx_lock(&(tq)->tq_mutex); \
96a9643ea8Slogwang } while (0)
97a9643ea8Slogwang #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
98a9643ea8Slogwang
99a9643ea8Slogwang #define TQ_UNLOCK(tq) \
100a9643ea8Slogwang do { \
101a9643ea8Slogwang if ((tq)->tq_spin) \
102a9643ea8Slogwang mtx_unlock_spin(&(tq)->tq_mutex); \
103a9643ea8Slogwang else \
104a9643ea8Slogwang mtx_unlock(&(tq)->tq_mutex); \
105a9643ea8Slogwang } while (0)
106a9643ea8Slogwang #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
107a9643ea8Slogwang
108a9643ea8Slogwang void
_timeout_task_init(struct taskqueue * queue,struct timeout_task * timeout_task,int priority,task_fn_t func,void * context)109a9643ea8Slogwang _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
110a9643ea8Slogwang int priority, task_fn_t func, void *context)
111a9643ea8Slogwang {
112a9643ea8Slogwang
113a9643ea8Slogwang TASK_INIT(&timeout_task->t, priority, func, context);
114a9643ea8Slogwang callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
115a9643ea8Slogwang CALLOUT_RETURNUNLOCKED);
116a9643ea8Slogwang timeout_task->q = queue;
117a9643ea8Slogwang timeout_task->f = 0;
118a9643ea8Slogwang }
119a9643ea8Slogwang
1200e1bd6daSlogwang #ifndef FSTACK
121a9643ea8Slogwang static __inline int
TQ_SLEEP(struct taskqueue * tq,void * p,const char * wm)122*22ce4affSfengbojiang TQ_SLEEP(struct taskqueue *tq, void *p, const char *wm)
123a9643ea8Slogwang {
124a9643ea8Slogwang if (tq->tq_spin)
125*22ce4affSfengbojiang return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0));
126*22ce4affSfengbojiang return (msleep(p, &tq->tq_mutex, 0, wm, 0));
127a9643ea8Slogwang }
1280e1bd6daSlogwang #else
129*22ce4affSfengbojiang #define TQ_SLEEP(a, b, c) break;
1300e1bd6daSlogwang #endif
131a9643ea8Slogwang
132a9643ea8Slogwang static struct taskqueue *
_taskqueue_create(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context,int mtxflags,const char * mtxname __unused)133a9643ea8Slogwang _taskqueue_create(const char *name, int mflags,
134a9643ea8Slogwang taskqueue_enqueue_fn enqueue, void *context,
135a9643ea8Slogwang int mtxflags, const char *mtxname __unused)
136a9643ea8Slogwang {
137a9643ea8Slogwang struct taskqueue *queue;
138a9643ea8Slogwang char *tq_name;
139a9643ea8Slogwang
140a9643ea8Slogwang tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
141a9643ea8Slogwang if (tq_name == NULL)
142a9643ea8Slogwang return (NULL);
143a9643ea8Slogwang
144a9643ea8Slogwang queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
145a9643ea8Slogwang if (queue == NULL) {
146a9643ea8Slogwang free(tq_name, M_TASKQUEUE);
147a9643ea8Slogwang return (NULL);
148a9643ea8Slogwang }
149a9643ea8Slogwang
150a9643ea8Slogwang snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
151a9643ea8Slogwang
152a9643ea8Slogwang STAILQ_INIT(&queue->tq_queue);
153*22ce4affSfengbojiang LIST_INIT(&queue->tq_active);
154a9643ea8Slogwang queue->tq_enqueue = enqueue;
155a9643ea8Slogwang queue->tq_context = context;
156a9643ea8Slogwang queue->tq_name = tq_name;
157a9643ea8Slogwang queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
158a9643ea8Slogwang queue->tq_flags |= TQ_FLAGS_ACTIVE;
159a9643ea8Slogwang if (enqueue == taskqueue_fast_enqueue ||
160a9643ea8Slogwang enqueue == taskqueue_swi_enqueue ||
161a9643ea8Slogwang enqueue == taskqueue_swi_giant_enqueue ||
162a9643ea8Slogwang enqueue == taskqueue_thread_enqueue)
163a9643ea8Slogwang queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
164a9643ea8Slogwang mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
165a9643ea8Slogwang
166a9643ea8Slogwang return (queue);
167a9643ea8Slogwang }
168a9643ea8Slogwang
169a9643ea8Slogwang struct taskqueue *
taskqueue_create(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context)170a9643ea8Slogwang taskqueue_create(const char *name, int mflags,
171a9643ea8Slogwang taskqueue_enqueue_fn enqueue, void *context)
172a9643ea8Slogwang {
173a9643ea8Slogwang
174a9643ea8Slogwang return _taskqueue_create(name, mflags, enqueue, context,
175a9643ea8Slogwang MTX_DEF, name);
176a9643ea8Slogwang }
177a9643ea8Slogwang
178a9643ea8Slogwang void
taskqueue_set_callback(struct taskqueue * queue,enum taskqueue_callback_type cb_type,taskqueue_callback_fn callback,void * context)179a9643ea8Slogwang taskqueue_set_callback(struct taskqueue *queue,
180a9643ea8Slogwang enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
181a9643ea8Slogwang void *context)
182a9643ea8Slogwang {
183a9643ea8Slogwang
184a9643ea8Slogwang KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
185a9643ea8Slogwang (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
186a9643ea8Slogwang ("Callback type %d not valid, must be %d-%d", cb_type,
187a9643ea8Slogwang TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
188a9643ea8Slogwang KASSERT((queue->tq_callbacks[cb_type] == NULL),
189a9643ea8Slogwang ("Re-initialization of taskqueue callback?"));
190a9643ea8Slogwang
191a9643ea8Slogwang queue->tq_callbacks[cb_type] = callback;
192a9643ea8Slogwang queue->tq_cb_contexts[cb_type] = context;
193a9643ea8Slogwang }
194a9643ea8Slogwang
195a9643ea8Slogwang /*
196a9643ea8Slogwang * Signal a taskqueue thread to terminate.
197a9643ea8Slogwang */
198a9643ea8Slogwang static void
taskqueue_terminate(struct thread ** pp,struct taskqueue * tq)199a9643ea8Slogwang taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
200a9643ea8Slogwang {
201a9643ea8Slogwang
202a9643ea8Slogwang while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
203a9643ea8Slogwang wakeup(tq);
204*22ce4affSfengbojiang TQ_SLEEP(tq, pp, "tq_destroy");
205a9643ea8Slogwang }
206a9643ea8Slogwang }
207a9643ea8Slogwang
208a9643ea8Slogwang void
taskqueue_free(struct taskqueue * queue)209a9643ea8Slogwang taskqueue_free(struct taskqueue *queue)
210a9643ea8Slogwang {
211a9643ea8Slogwang
212a9643ea8Slogwang TQ_LOCK(queue);
213a9643ea8Slogwang queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
214a9643ea8Slogwang taskqueue_terminate(queue->tq_threads, queue);
215*22ce4affSfengbojiang KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
216a9643ea8Slogwang KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
217a9643ea8Slogwang mtx_destroy(&queue->tq_mutex);
218a9643ea8Slogwang free(queue->tq_threads, M_TASKQUEUE);
219a9643ea8Slogwang free(queue->tq_name, M_TASKQUEUE);
220a9643ea8Slogwang free(queue, M_TASKQUEUE);
221a9643ea8Slogwang }
222a9643ea8Slogwang
223a9643ea8Slogwang static int
taskqueue_enqueue_locked(struct taskqueue * queue,struct task * task)224a9643ea8Slogwang taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
225a9643ea8Slogwang {
226a9643ea8Slogwang struct task *ins;
227a9643ea8Slogwang struct task *prev;
228a9643ea8Slogwang
229a9643ea8Slogwang KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
230a9643ea8Slogwang /*
231a9643ea8Slogwang * Count multiple enqueues.
232a9643ea8Slogwang */
233a9643ea8Slogwang if (task->ta_pending) {
234a9643ea8Slogwang if (task->ta_pending < USHRT_MAX)
235a9643ea8Slogwang task->ta_pending++;
236a9643ea8Slogwang TQ_UNLOCK(queue);
237a9643ea8Slogwang return (0);
238a9643ea8Slogwang }
239a9643ea8Slogwang
240a9643ea8Slogwang /*
241*22ce4affSfengbojiang * Optimise cases when all tasks use small set of priorities.
242*22ce4affSfengbojiang * In case of only one priority we always insert at the end.
243*22ce4affSfengbojiang * In case of two tq_hint typically gives the insertion point.
244*22ce4affSfengbojiang * In case of more then two tq_hint should halve the search.
245a9643ea8Slogwang */
246a9643ea8Slogwang prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
247a9643ea8Slogwang if (!prev || prev->ta_priority >= task->ta_priority) {
248a9643ea8Slogwang STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
249a9643ea8Slogwang } else {
250*22ce4affSfengbojiang prev = queue->tq_hint;
251*22ce4affSfengbojiang if (prev && prev->ta_priority >= task->ta_priority) {
252*22ce4affSfengbojiang ins = STAILQ_NEXT(prev, ta_link);
253*22ce4affSfengbojiang } else {
254a9643ea8Slogwang prev = NULL;
255*22ce4affSfengbojiang ins = STAILQ_FIRST(&queue->tq_queue);
256*22ce4affSfengbojiang }
257*22ce4affSfengbojiang for (; ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link))
258a9643ea8Slogwang if (ins->ta_priority < task->ta_priority)
259a9643ea8Slogwang break;
260a9643ea8Slogwang
261*22ce4affSfengbojiang if (prev) {
262a9643ea8Slogwang STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
263*22ce4affSfengbojiang queue->tq_hint = task;
264*22ce4affSfengbojiang } else
265a9643ea8Slogwang STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
266a9643ea8Slogwang }
267a9643ea8Slogwang
268a9643ea8Slogwang task->ta_pending = 1;
269a9643ea8Slogwang if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
270a9643ea8Slogwang TQ_UNLOCK(queue);
271a9643ea8Slogwang if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
272a9643ea8Slogwang queue->tq_enqueue(queue->tq_context);
273a9643ea8Slogwang if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
274a9643ea8Slogwang TQ_UNLOCK(queue);
275a9643ea8Slogwang
276a9643ea8Slogwang /* Return with lock released. */
277a9643ea8Slogwang return (0);
278a9643ea8Slogwang }
279a9643ea8Slogwang
280a9643ea8Slogwang int
taskqueue_enqueue(struct taskqueue * queue,struct task * task)281a9643ea8Slogwang taskqueue_enqueue(struct taskqueue *queue, struct task *task)
282a9643ea8Slogwang {
283a9643ea8Slogwang int res;
284a9643ea8Slogwang
285a9643ea8Slogwang TQ_LOCK(queue);
286a9643ea8Slogwang res = taskqueue_enqueue_locked(queue, task);
287a9643ea8Slogwang /* The lock is released inside. */
288a9643ea8Slogwang
289a9643ea8Slogwang return (res);
290a9643ea8Slogwang }
291a9643ea8Slogwang
292a9643ea8Slogwang static void
taskqueue_timeout_func(void * arg)293a9643ea8Slogwang taskqueue_timeout_func(void *arg)
294a9643ea8Slogwang {
295a9643ea8Slogwang struct taskqueue *queue;
296a9643ea8Slogwang struct timeout_task *timeout_task;
297a9643ea8Slogwang
298a9643ea8Slogwang timeout_task = arg;
299a9643ea8Slogwang queue = timeout_task->q;
300a9643ea8Slogwang KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
301a9643ea8Slogwang timeout_task->f &= ~DT_CALLOUT_ARMED;
302a9643ea8Slogwang queue->tq_callouts--;
303a9643ea8Slogwang taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
304a9643ea8Slogwang /* The lock is released inside. */
305a9643ea8Slogwang }
306a9643ea8Slogwang
307a9643ea8Slogwang int
taskqueue_enqueue_timeout_sbt(struct taskqueue * queue,struct timeout_task * timeout_task,sbintime_t sbt,sbintime_t pr,int flags)308*22ce4affSfengbojiang taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
309*22ce4affSfengbojiang struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags)
310a9643ea8Slogwang {
311a9643ea8Slogwang int res;
312a9643ea8Slogwang
313a9643ea8Slogwang TQ_LOCK(queue);
314a9643ea8Slogwang KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
315a9643ea8Slogwang ("Migrated queue"));
316a9643ea8Slogwang KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
317a9643ea8Slogwang timeout_task->q = queue;
318a9643ea8Slogwang res = timeout_task->t.ta_pending;
319*22ce4affSfengbojiang if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
320*22ce4affSfengbojiang /* Do nothing */
321*22ce4affSfengbojiang TQ_UNLOCK(queue);
322*22ce4affSfengbojiang res = -1;
323*22ce4affSfengbojiang } else if (sbt == 0) {
324a9643ea8Slogwang taskqueue_enqueue_locked(queue, &timeout_task->t);
325a9643ea8Slogwang /* The lock is released inside. */
326a9643ea8Slogwang } else {
327a9643ea8Slogwang if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
328a9643ea8Slogwang res++;
329a9643ea8Slogwang } else {
330a9643ea8Slogwang queue->tq_callouts++;
331a9643ea8Slogwang timeout_task->f |= DT_CALLOUT_ARMED;
332*22ce4affSfengbojiang if (sbt < 0)
333*22ce4affSfengbojiang sbt = -sbt; /* Ignore overflow. */
334a9643ea8Slogwang }
335*22ce4affSfengbojiang if (sbt > 0) {
336*22ce4affSfengbojiang callout_reset_sbt(&timeout_task->c, sbt, pr,
337*22ce4affSfengbojiang taskqueue_timeout_func, timeout_task, flags);
338a9643ea8Slogwang }
339a9643ea8Slogwang TQ_UNLOCK(queue);
340a9643ea8Slogwang }
341a9643ea8Slogwang return (res);
342a9643ea8Slogwang }
343a9643ea8Slogwang
344*22ce4affSfengbojiang int
taskqueue_enqueue_timeout(struct taskqueue * queue,struct timeout_task * ttask,int ticks)345*22ce4affSfengbojiang taskqueue_enqueue_timeout(struct taskqueue *queue,
346*22ce4affSfengbojiang struct timeout_task *ttask, int ticks)
347*22ce4affSfengbojiang {
348*22ce4affSfengbojiang
349*22ce4affSfengbojiang return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt,
350*22ce4affSfengbojiang 0, 0));
351*22ce4affSfengbojiang }
352*22ce4affSfengbojiang
353a9643ea8Slogwang static void
taskqueue_task_nop_fn(void * context,int pending)354a9643ea8Slogwang taskqueue_task_nop_fn(void *context, int pending)
355a9643ea8Slogwang {
356a9643ea8Slogwang }
357a9643ea8Slogwang
358a9643ea8Slogwang /*
359a9643ea8Slogwang * Block until all currently queued tasks in this taskqueue
360a9643ea8Slogwang * have begun execution. Tasks queued during execution of
361a9643ea8Slogwang * this function are ignored.
362a9643ea8Slogwang */
363*22ce4affSfengbojiang static int
taskqueue_drain_tq_queue(struct taskqueue * queue)364a9643ea8Slogwang taskqueue_drain_tq_queue(struct taskqueue *queue)
365a9643ea8Slogwang {
366a9643ea8Slogwang struct task t_barrier;
367a9643ea8Slogwang
368a9643ea8Slogwang if (STAILQ_EMPTY(&queue->tq_queue))
369*22ce4affSfengbojiang return (0);
370a9643ea8Slogwang
371a9643ea8Slogwang /*
372a9643ea8Slogwang * Enqueue our barrier after all current tasks, but with
373a9643ea8Slogwang * the highest priority so that newly queued tasks cannot
374a9643ea8Slogwang * pass it. Because of the high priority, we can not use
375a9643ea8Slogwang * taskqueue_enqueue_locked directly (which drops the lock
376a9643ea8Slogwang * anyway) so just insert it at tail while we have the
377a9643ea8Slogwang * queue lock.
378a9643ea8Slogwang */
379*22ce4affSfengbojiang TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier);
380a9643ea8Slogwang STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
381*22ce4affSfengbojiang queue->tq_hint = &t_barrier;
382a9643ea8Slogwang t_barrier.ta_pending = 1;
383a9643ea8Slogwang
384a9643ea8Slogwang /*
385a9643ea8Slogwang * Once the barrier has executed, all previously queued tasks
386a9643ea8Slogwang * have completed or are currently executing.
387a9643ea8Slogwang */
388a9643ea8Slogwang while (t_barrier.ta_pending != 0)
389*22ce4affSfengbojiang TQ_SLEEP(queue, &t_barrier, "tq_qdrain");
390*22ce4affSfengbojiang return (1);
391a9643ea8Slogwang }
392a9643ea8Slogwang
393a9643ea8Slogwang /*
394a9643ea8Slogwang * Block until all currently executing tasks for this taskqueue
395a9643ea8Slogwang * complete. Tasks that begin execution during the execution
396a9643ea8Slogwang * of this function are ignored.
397a9643ea8Slogwang */
398*22ce4affSfengbojiang static int
taskqueue_drain_tq_active(struct taskqueue * queue)399a9643ea8Slogwang taskqueue_drain_tq_active(struct taskqueue *queue)
400a9643ea8Slogwang {
401*22ce4affSfengbojiang struct taskqueue_busy *tb;
402*22ce4affSfengbojiang u_int seq;
403a9643ea8Slogwang
404*22ce4affSfengbojiang if (LIST_EMPTY(&queue->tq_active))
405*22ce4affSfengbojiang return (0);
406a9643ea8Slogwang
407a9643ea8Slogwang /* Block taskq_terminate().*/
408a9643ea8Slogwang queue->tq_callouts++;
409a9643ea8Slogwang
410*22ce4affSfengbojiang /* Wait for any active task with sequence from the past. */
411*22ce4affSfengbojiang seq = queue->tq_seq;
412*22ce4affSfengbojiang restart:
413*22ce4affSfengbojiang LIST_FOREACH(tb, &queue->tq_active, tb_link) {
414*22ce4affSfengbojiang if ((int)(tb->tb_seq - seq) <= 0) {
415*22ce4affSfengbojiang TQ_SLEEP(queue, tb->tb_running, "tq_adrain");
416*22ce4affSfengbojiang goto restart;
417*22ce4affSfengbojiang }
418*22ce4affSfengbojiang }
419a9643ea8Slogwang
420a9643ea8Slogwang /* Release taskqueue_terminate(). */
421a9643ea8Slogwang queue->tq_callouts--;
422a9643ea8Slogwang if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
423a9643ea8Slogwang wakeup_one(queue->tq_threads);
424*22ce4affSfengbojiang return (1);
425a9643ea8Slogwang }
426a9643ea8Slogwang
427a9643ea8Slogwang void
taskqueue_block(struct taskqueue * queue)428a9643ea8Slogwang taskqueue_block(struct taskqueue *queue)
429a9643ea8Slogwang {
430a9643ea8Slogwang
431a9643ea8Slogwang TQ_LOCK(queue);
432a9643ea8Slogwang queue->tq_flags |= TQ_FLAGS_BLOCKED;
433a9643ea8Slogwang TQ_UNLOCK(queue);
434a9643ea8Slogwang }
435a9643ea8Slogwang
436a9643ea8Slogwang void
taskqueue_unblock(struct taskqueue * queue)437a9643ea8Slogwang taskqueue_unblock(struct taskqueue *queue)
438a9643ea8Slogwang {
439a9643ea8Slogwang
440a9643ea8Slogwang TQ_LOCK(queue);
441a9643ea8Slogwang queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
442a9643ea8Slogwang if (!STAILQ_EMPTY(&queue->tq_queue))
443a9643ea8Slogwang queue->tq_enqueue(queue->tq_context);
444a9643ea8Slogwang TQ_UNLOCK(queue);
445a9643ea8Slogwang }
446a9643ea8Slogwang
447a9643ea8Slogwang static void
taskqueue_run_locked(struct taskqueue * queue)448a9643ea8Slogwang taskqueue_run_locked(struct taskqueue *queue)
449a9643ea8Slogwang {
450*22ce4affSfengbojiang struct epoch_tracker et;
451a9643ea8Slogwang struct taskqueue_busy tb;
452a9643ea8Slogwang struct task *task;
453*22ce4affSfengbojiang bool in_net_epoch;
454a9643ea8Slogwang int pending;
455a9643ea8Slogwang
456a9643ea8Slogwang KASSERT(queue != NULL, ("tq is NULL"));
457a9643ea8Slogwang TQ_ASSERT_LOCKED(queue);
458a9643ea8Slogwang tb.tb_running = NULL;
459*22ce4affSfengbojiang LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
460*22ce4affSfengbojiang in_net_epoch = false;
461a9643ea8Slogwang
462*22ce4affSfengbojiang while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
463a9643ea8Slogwang STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
464*22ce4affSfengbojiang if (queue->tq_hint == task)
465*22ce4affSfengbojiang queue->tq_hint = NULL;
466a9643ea8Slogwang pending = task->ta_pending;
467a9643ea8Slogwang task->ta_pending = 0;
468a9643ea8Slogwang tb.tb_running = task;
469*22ce4affSfengbojiang tb.tb_seq = ++queue->tq_seq;
470a9643ea8Slogwang TQ_UNLOCK(queue);
471a9643ea8Slogwang
472a9643ea8Slogwang KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
473*22ce4affSfengbojiang if (!in_net_epoch && TASK_IS_NET(task)) {
474*22ce4affSfengbojiang in_net_epoch = true;
475*22ce4affSfengbojiang NET_EPOCH_ENTER(et);
476*22ce4affSfengbojiang } else if (in_net_epoch && !TASK_IS_NET(task)) {
477*22ce4affSfengbojiang NET_EPOCH_EXIT(et);
478*22ce4affSfengbojiang in_net_epoch = false;
479*22ce4affSfengbojiang }
480a9643ea8Slogwang task->ta_func(task->ta_context, pending);
481a9643ea8Slogwang
482a9643ea8Slogwang TQ_LOCK(queue);
483a9643ea8Slogwang wakeup(task);
484a9643ea8Slogwang }
485*22ce4affSfengbojiang if (in_net_epoch)
486*22ce4affSfengbojiang NET_EPOCH_EXIT(et);
487*22ce4affSfengbojiang LIST_REMOVE(&tb, tb_link);
488a9643ea8Slogwang }
489a9643ea8Slogwang
490a9643ea8Slogwang void
taskqueue_run(struct taskqueue * queue)491a9643ea8Slogwang taskqueue_run(struct taskqueue *queue)
492a9643ea8Slogwang {
493a9643ea8Slogwang
494a9643ea8Slogwang TQ_LOCK(queue);
495a9643ea8Slogwang taskqueue_run_locked(queue);
496a9643ea8Slogwang TQ_UNLOCK(queue);
497a9643ea8Slogwang }
498a9643ea8Slogwang
499a9643ea8Slogwang static int
task_is_running(struct taskqueue * queue,struct task * task)500a9643ea8Slogwang task_is_running(struct taskqueue *queue, struct task *task)
501a9643ea8Slogwang {
502a9643ea8Slogwang struct taskqueue_busy *tb;
503a9643ea8Slogwang
504a9643ea8Slogwang TQ_ASSERT_LOCKED(queue);
505*22ce4affSfengbojiang LIST_FOREACH(tb, &queue->tq_active, tb_link) {
506a9643ea8Slogwang if (tb->tb_running == task)
507a9643ea8Slogwang return (1);
508a9643ea8Slogwang }
509a9643ea8Slogwang return (0);
510a9643ea8Slogwang }
511a9643ea8Slogwang
512*22ce4affSfengbojiang /*
513*22ce4affSfengbojiang * Only use this function in single threaded contexts. It returns
514*22ce4affSfengbojiang * non-zero if the given task is either pending or running. Else the
515*22ce4affSfengbojiang * task is idle and can be queued again or freed.
516*22ce4affSfengbojiang */
517*22ce4affSfengbojiang int
taskqueue_poll_is_busy(struct taskqueue * queue,struct task * task)518*22ce4affSfengbojiang taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task)
519*22ce4affSfengbojiang {
520*22ce4affSfengbojiang int retval;
521*22ce4affSfengbojiang
522*22ce4affSfengbojiang TQ_LOCK(queue);
523*22ce4affSfengbojiang retval = task->ta_pending > 0 || task_is_running(queue, task);
524*22ce4affSfengbojiang TQ_UNLOCK(queue);
525*22ce4affSfengbojiang
526*22ce4affSfengbojiang return (retval);
527*22ce4affSfengbojiang }
528*22ce4affSfengbojiang
529a9643ea8Slogwang static int
taskqueue_cancel_locked(struct taskqueue * queue,struct task * task,u_int * pendp)530a9643ea8Slogwang taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
531a9643ea8Slogwang u_int *pendp)
532a9643ea8Slogwang {
533a9643ea8Slogwang
534*22ce4affSfengbojiang if (task->ta_pending > 0) {
535a9643ea8Slogwang STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
536*22ce4affSfengbojiang if (queue->tq_hint == task)
537*22ce4affSfengbojiang queue->tq_hint = NULL;
538*22ce4affSfengbojiang }
539a9643ea8Slogwang if (pendp != NULL)
540a9643ea8Slogwang *pendp = task->ta_pending;
541a9643ea8Slogwang task->ta_pending = 0;
542a9643ea8Slogwang return (task_is_running(queue, task) ? EBUSY : 0);
543a9643ea8Slogwang }
544a9643ea8Slogwang
545a9643ea8Slogwang int
taskqueue_cancel(struct taskqueue * queue,struct task * task,u_int * pendp)546a9643ea8Slogwang taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
547a9643ea8Slogwang {
548a9643ea8Slogwang int error;
549a9643ea8Slogwang
550a9643ea8Slogwang TQ_LOCK(queue);
551a9643ea8Slogwang error = taskqueue_cancel_locked(queue, task, pendp);
552a9643ea8Slogwang TQ_UNLOCK(queue);
553a9643ea8Slogwang
554a9643ea8Slogwang return (error);
555a9643ea8Slogwang }
556a9643ea8Slogwang
557a9643ea8Slogwang int
taskqueue_cancel_timeout(struct taskqueue * queue,struct timeout_task * timeout_task,u_int * pendp)558a9643ea8Slogwang taskqueue_cancel_timeout(struct taskqueue *queue,
559a9643ea8Slogwang struct timeout_task *timeout_task, u_int *pendp)
560a9643ea8Slogwang {
561a9643ea8Slogwang u_int pending, pending1;
562a9643ea8Slogwang int error;
563a9643ea8Slogwang
564a9643ea8Slogwang TQ_LOCK(queue);
565a9643ea8Slogwang pending = !!(callout_stop(&timeout_task->c) > 0);
566a9643ea8Slogwang error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
567a9643ea8Slogwang if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
568a9643ea8Slogwang timeout_task->f &= ~DT_CALLOUT_ARMED;
569a9643ea8Slogwang queue->tq_callouts--;
570a9643ea8Slogwang }
571a9643ea8Slogwang TQ_UNLOCK(queue);
572a9643ea8Slogwang
573a9643ea8Slogwang if (pendp != NULL)
574a9643ea8Slogwang *pendp = pending + pending1;
575a9643ea8Slogwang return (error);
576a9643ea8Slogwang }
577a9643ea8Slogwang
578a9643ea8Slogwang void
taskqueue_drain(struct taskqueue * queue,struct task * task)579a9643ea8Slogwang taskqueue_drain(struct taskqueue *queue, struct task *task)
580a9643ea8Slogwang {
581a9643ea8Slogwang
582a9643ea8Slogwang if (!queue->tq_spin)
583a9643ea8Slogwang WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
584a9643ea8Slogwang
585a9643ea8Slogwang TQ_LOCK(queue);
586a9643ea8Slogwang while (task->ta_pending != 0 || task_is_running(queue, task))
587*22ce4affSfengbojiang TQ_SLEEP(queue, task, "tq_drain");
588a9643ea8Slogwang TQ_UNLOCK(queue);
589a9643ea8Slogwang }
590a9643ea8Slogwang
591a9643ea8Slogwang void
taskqueue_drain_all(struct taskqueue * queue)592a9643ea8Slogwang taskqueue_drain_all(struct taskqueue *queue)
593a9643ea8Slogwang {
594a9643ea8Slogwang
595a9643ea8Slogwang if (!queue->tq_spin)
596a9643ea8Slogwang WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
597a9643ea8Slogwang
598a9643ea8Slogwang TQ_LOCK(queue);
599*22ce4affSfengbojiang (void)taskqueue_drain_tq_queue(queue);
600*22ce4affSfengbojiang (void)taskqueue_drain_tq_active(queue);
601a9643ea8Slogwang TQ_UNLOCK(queue);
602a9643ea8Slogwang }
603a9643ea8Slogwang
604a9643ea8Slogwang void
taskqueue_drain_timeout(struct taskqueue * queue,struct timeout_task * timeout_task)605a9643ea8Slogwang taskqueue_drain_timeout(struct taskqueue *queue,
606a9643ea8Slogwang struct timeout_task *timeout_task)
607a9643ea8Slogwang {
608a9643ea8Slogwang
609*22ce4affSfengbojiang /*
610*22ce4affSfengbojiang * Set flag to prevent timer from re-starting during drain:
611*22ce4affSfengbojiang */
612*22ce4affSfengbojiang TQ_LOCK(queue);
613*22ce4affSfengbojiang KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0,
614*22ce4affSfengbojiang ("Drain already in progress"));
615*22ce4affSfengbojiang timeout_task->f |= DT_DRAIN_IN_PROGRESS;
616*22ce4affSfengbojiang TQ_UNLOCK(queue);
617*22ce4affSfengbojiang
618a9643ea8Slogwang callout_drain(&timeout_task->c);
619a9643ea8Slogwang taskqueue_drain(queue, &timeout_task->t);
620*22ce4affSfengbojiang
621*22ce4affSfengbojiang /*
622*22ce4affSfengbojiang * Clear flag to allow timer to re-start:
623*22ce4affSfengbojiang */
624*22ce4affSfengbojiang TQ_LOCK(queue);
625*22ce4affSfengbojiang timeout_task->f &= ~DT_DRAIN_IN_PROGRESS;
626*22ce4affSfengbojiang TQ_UNLOCK(queue);
627*22ce4affSfengbojiang }
628*22ce4affSfengbojiang
629*22ce4affSfengbojiang void
taskqueue_quiesce(struct taskqueue * queue)630*22ce4affSfengbojiang taskqueue_quiesce(struct taskqueue *queue)
631*22ce4affSfengbojiang {
632*22ce4affSfengbojiang int ret;
633*22ce4affSfengbojiang
634*22ce4affSfengbojiang TQ_LOCK(queue);
635*22ce4affSfengbojiang do {
636*22ce4affSfengbojiang ret = taskqueue_drain_tq_queue(queue);
637*22ce4affSfengbojiang if (ret == 0)
638*22ce4affSfengbojiang ret = taskqueue_drain_tq_active(queue);
639*22ce4affSfengbojiang } while (ret != 0);
640*22ce4affSfengbojiang TQ_UNLOCK(queue);
641a9643ea8Slogwang }
642a9643ea8Slogwang
643a9643ea8Slogwang static void
taskqueue_swi_enqueue(void * context)644a9643ea8Slogwang taskqueue_swi_enqueue(void *context)
645a9643ea8Slogwang {
646a9643ea8Slogwang swi_sched(taskqueue_ih, 0);
647a9643ea8Slogwang }
648a9643ea8Slogwang
649a9643ea8Slogwang static void
taskqueue_swi_run(void * dummy)650a9643ea8Slogwang taskqueue_swi_run(void *dummy)
651a9643ea8Slogwang {
652a9643ea8Slogwang taskqueue_run(taskqueue_swi);
653a9643ea8Slogwang }
654a9643ea8Slogwang
655a9643ea8Slogwang static void
taskqueue_swi_giant_enqueue(void * context)656a9643ea8Slogwang taskqueue_swi_giant_enqueue(void *context)
657a9643ea8Slogwang {
658a9643ea8Slogwang swi_sched(taskqueue_giant_ih, 0);
659a9643ea8Slogwang }
660a9643ea8Slogwang
661a9643ea8Slogwang static void
taskqueue_swi_giant_run(void * dummy)662a9643ea8Slogwang taskqueue_swi_giant_run(void *dummy)
663a9643ea8Slogwang {
664a9643ea8Slogwang taskqueue_run(taskqueue_swi_giant);
665a9643ea8Slogwang }
666a9643ea8Slogwang
667a9643ea8Slogwang #ifndef FSTACK
668a9643ea8Slogwang static int
_taskqueue_start_threads(struct taskqueue ** tqp,int count,int pri,cpuset_t * mask,struct proc * p,const char * name,va_list ap)669a9643ea8Slogwang _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
670*22ce4affSfengbojiang cpuset_t *mask, struct proc *p, const char *name, va_list ap)
671a9643ea8Slogwang {
672a9643ea8Slogwang char ktname[MAXCOMLEN + 1];
673a9643ea8Slogwang struct thread *td;
674a9643ea8Slogwang struct taskqueue *tq;
675a9643ea8Slogwang int i, error;
676a9643ea8Slogwang
677a9643ea8Slogwang if (count <= 0)
678a9643ea8Slogwang return (EINVAL);
679a9643ea8Slogwang
680a9643ea8Slogwang vsnprintf(ktname, sizeof(ktname), name, ap);
681a9643ea8Slogwang tq = *tqp;
682a9643ea8Slogwang
683a9643ea8Slogwang tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
684a9643ea8Slogwang M_NOWAIT | M_ZERO);
685a9643ea8Slogwang if (tq->tq_threads == NULL) {
686a9643ea8Slogwang printf("%s: no memory for %s threads\n", __func__, ktname);
687a9643ea8Slogwang return (ENOMEM);
688a9643ea8Slogwang }
689a9643ea8Slogwang
690a9643ea8Slogwang for (i = 0; i < count; i++) {
691a9643ea8Slogwang if (count == 1)
692*22ce4affSfengbojiang error = kthread_add(taskqueue_thread_loop, tqp, p,
693a9643ea8Slogwang &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
694a9643ea8Slogwang else
695*22ce4affSfengbojiang error = kthread_add(taskqueue_thread_loop, tqp, p,
696a9643ea8Slogwang &tq->tq_threads[i], RFSTOPPED, 0,
697a9643ea8Slogwang "%s_%d", ktname, i);
698a9643ea8Slogwang if (error) {
699a9643ea8Slogwang /* should be ok to continue, taskqueue_free will dtrt */
700a9643ea8Slogwang printf("%s: kthread_add(%s): error %d", __func__,
701a9643ea8Slogwang ktname, error);
702a9643ea8Slogwang tq->tq_threads[i] = NULL; /* paranoid */
703a9643ea8Slogwang } else
704a9643ea8Slogwang tq->tq_tcount++;
705a9643ea8Slogwang }
706*22ce4affSfengbojiang if (tq->tq_tcount == 0) {
707*22ce4affSfengbojiang free(tq->tq_threads, M_TASKQUEUE);
708*22ce4affSfengbojiang tq->tq_threads = NULL;
709*22ce4affSfengbojiang return (ENOMEM);
710*22ce4affSfengbojiang }
711a9643ea8Slogwang for (i = 0; i < count; i++) {
712a9643ea8Slogwang if (tq->tq_threads[i] == NULL)
713a9643ea8Slogwang continue;
714a9643ea8Slogwang td = tq->tq_threads[i];
715a9643ea8Slogwang if (mask) {
716a9643ea8Slogwang error = cpuset_setthread(td->td_tid, mask);
717a9643ea8Slogwang /*
718a9643ea8Slogwang * Failing to pin is rarely an actual fatal error;
719a9643ea8Slogwang * it'll just affect performance.
720a9643ea8Slogwang */
721a9643ea8Slogwang if (error)
722a9643ea8Slogwang printf("%s: curthread=%llu: can't pin; "
723a9643ea8Slogwang "error=%d\n",
724a9643ea8Slogwang __func__,
725a9643ea8Slogwang (unsigned long long) td->td_tid,
726a9643ea8Slogwang error);
727a9643ea8Slogwang }
728a9643ea8Slogwang thread_lock(td);
729a9643ea8Slogwang sched_prio(td, pri);
730a9643ea8Slogwang sched_add(td, SRQ_BORING);
731a9643ea8Slogwang }
732a9643ea8Slogwang
733a9643ea8Slogwang return (0);
734a9643ea8Slogwang }
735a9643ea8Slogwang #endif
736a9643ea8Slogwang
737a9643ea8Slogwang int
taskqueue_start_threads(struct taskqueue ** tqp,int count,int pri,const char * name,...)738a9643ea8Slogwang taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
739a9643ea8Slogwang const char *name, ...)
740a9643ea8Slogwang {
741a9643ea8Slogwang #ifndef FSTACK
742a9643ea8Slogwang va_list ap;
743a9643ea8Slogwang int error;
744a9643ea8Slogwang
745a9643ea8Slogwang va_start(ap, name);
746*22ce4affSfengbojiang error = _taskqueue_start_threads(tqp, count, pri, NULL, NULL, name, ap);
747*22ce4affSfengbojiang va_end(ap);
748*22ce4affSfengbojiang return (error);
749*22ce4affSfengbojiang #else
750*22ce4affSfengbojiang return (0);
751*22ce4affSfengbojiang #endif
752*22ce4affSfengbojiang }
753*22ce4affSfengbojiang
754*22ce4affSfengbojiang int
taskqueue_start_threads_in_proc(struct taskqueue ** tqp,int count,int pri,struct proc * proc,const char * name,...)755*22ce4affSfengbojiang taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri,
756*22ce4affSfengbojiang struct proc *proc, const char *name, ...)
757*22ce4affSfengbojiang {
758*22ce4affSfengbojiang #ifndef FSTACK
759*22ce4affSfengbojiang va_list ap;
760*22ce4affSfengbojiang int error;
761*22ce4affSfengbojiang
762*22ce4affSfengbojiang va_start(ap, name);
763*22ce4affSfengbojiang error = _taskqueue_start_threads(tqp, count, pri, NULL, proc, name, ap);
764a9643ea8Slogwang va_end(ap);
765a9643ea8Slogwang return (error);
766a9643ea8Slogwang #else
767a9643ea8Slogwang return (0);
768a9643ea8Slogwang #endif
769a9643ea8Slogwang }
770a9643ea8Slogwang
771a9643ea8Slogwang int
taskqueue_start_threads_cpuset(struct taskqueue ** tqp,int count,int pri,cpuset_t * mask,const char * name,...)772a9643ea8Slogwang taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
773a9643ea8Slogwang cpuset_t *mask, const char *name, ...)
774a9643ea8Slogwang {
775a9643ea8Slogwang #ifndef FSTACK
776a9643ea8Slogwang va_list ap;
777a9643ea8Slogwang int error;
778a9643ea8Slogwang
779a9643ea8Slogwang va_start(ap, name);
780*22ce4affSfengbojiang error = _taskqueue_start_threads(tqp, count, pri, mask, NULL, name, ap);
781a9643ea8Slogwang va_end(ap);
782a9643ea8Slogwang return (error);
783a9643ea8Slogwang #else
784a9643ea8Slogwang return (0);
785a9643ea8Slogwang #endif
786a9643ea8Slogwang }
787a9643ea8Slogwang
788a9643ea8Slogwang static inline void
taskqueue_run_callback(struct taskqueue * tq,enum taskqueue_callback_type cb_type)789a9643ea8Slogwang taskqueue_run_callback(struct taskqueue *tq,
790a9643ea8Slogwang enum taskqueue_callback_type cb_type)
791a9643ea8Slogwang {
792a9643ea8Slogwang taskqueue_callback_fn tq_callback;
793a9643ea8Slogwang
794a9643ea8Slogwang TQ_ASSERT_UNLOCKED(tq);
795a9643ea8Slogwang tq_callback = tq->tq_callbacks[cb_type];
796a9643ea8Slogwang if (tq_callback != NULL)
797a9643ea8Slogwang tq_callback(tq->tq_cb_contexts[cb_type]);
798a9643ea8Slogwang }
799a9643ea8Slogwang
800a9643ea8Slogwang void
taskqueue_thread_loop(void * arg)801a9643ea8Slogwang taskqueue_thread_loop(void *arg)
802a9643ea8Slogwang {
803a9643ea8Slogwang struct taskqueue **tqp, *tq;
804a9643ea8Slogwang
805a9643ea8Slogwang tqp = arg;
806a9643ea8Slogwang tq = *tqp;
807a9643ea8Slogwang taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
808a9643ea8Slogwang TQ_LOCK(tq);
809a9643ea8Slogwang while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
810a9643ea8Slogwang /* XXX ? */
811a9643ea8Slogwang taskqueue_run_locked(tq);
812a9643ea8Slogwang /*
813a9643ea8Slogwang * Because taskqueue_run() can drop tq_mutex, we need to
814a9643ea8Slogwang * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
815a9643ea8Slogwang * meantime, which means we missed a wakeup.
816a9643ea8Slogwang */
817a9643ea8Slogwang if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
818a9643ea8Slogwang break;
819*22ce4affSfengbojiang TQ_SLEEP(tq, tq, "-");
820a9643ea8Slogwang }
821a9643ea8Slogwang taskqueue_run_locked(tq);
822a9643ea8Slogwang /*
823a9643ea8Slogwang * This thread is on its way out, so just drop the lock temporarily
824a9643ea8Slogwang * in order to call the shutdown callback. This allows the callback
825a9643ea8Slogwang * to look at the taskqueue, even just before it dies.
826a9643ea8Slogwang */
827a9643ea8Slogwang TQ_UNLOCK(tq);
828a9643ea8Slogwang taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
829a9643ea8Slogwang TQ_LOCK(tq);
830a9643ea8Slogwang
831a9643ea8Slogwang /* rendezvous with thread that asked us to terminate */
832a9643ea8Slogwang tq->tq_tcount--;
833a9643ea8Slogwang wakeup_one(tq->tq_threads);
834a9643ea8Slogwang TQ_UNLOCK(tq);
835a9643ea8Slogwang kthread_exit();
836a9643ea8Slogwang }
837a9643ea8Slogwang
838a9643ea8Slogwang void
taskqueue_thread_enqueue(void * context)839a9643ea8Slogwang taskqueue_thread_enqueue(void *context)
840a9643ea8Slogwang {
841a9643ea8Slogwang struct taskqueue **tqp, *tq;
842a9643ea8Slogwang
843a9643ea8Slogwang tqp = context;
844a9643ea8Slogwang tq = *tqp;
845*22ce4affSfengbojiang wakeup_any(tq);
846a9643ea8Slogwang }
847a9643ea8Slogwang
848a9643ea8Slogwang TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
849a9643ea8Slogwang swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
850a9643ea8Slogwang INTR_MPSAFE, &taskqueue_ih));
851a9643ea8Slogwang
852a9643ea8Slogwang TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
853a9643ea8Slogwang swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
854a9643ea8Slogwang NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
855a9643ea8Slogwang
856a9643ea8Slogwang TASKQUEUE_DEFINE_THREAD(thread);
857a9643ea8Slogwang
858a9643ea8Slogwang struct taskqueue *
taskqueue_create_fast(const char * name,int mflags,taskqueue_enqueue_fn enqueue,void * context)859a9643ea8Slogwang taskqueue_create_fast(const char *name, int mflags,
860a9643ea8Slogwang taskqueue_enqueue_fn enqueue, void *context)
861a9643ea8Slogwang {
862a9643ea8Slogwang return _taskqueue_create(name, mflags, enqueue, context,
863a9643ea8Slogwang MTX_SPIN, "fast_taskqueue");
864a9643ea8Slogwang }
865a9643ea8Slogwang
866a9643ea8Slogwang static void *taskqueue_fast_ih;
867a9643ea8Slogwang
868a9643ea8Slogwang static void
taskqueue_fast_enqueue(void * context)869a9643ea8Slogwang taskqueue_fast_enqueue(void *context)
870a9643ea8Slogwang {
871a9643ea8Slogwang swi_sched(taskqueue_fast_ih, 0);
872a9643ea8Slogwang }
873a9643ea8Slogwang
874a9643ea8Slogwang static void
taskqueue_fast_run(void * dummy)875a9643ea8Slogwang taskqueue_fast_run(void *dummy)
876a9643ea8Slogwang {
877a9643ea8Slogwang taskqueue_run(taskqueue_fast);
878a9643ea8Slogwang }
879a9643ea8Slogwang
880a9643ea8Slogwang TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
881a9643ea8Slogwang swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
882a9643ea8Slogwang SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
883a9643ea8Slogwang
884a9643ea8Slogwang int
taskqueue_member(struct taskqueue * queue,struct thread * td)885a9643ea8Slogwang taskqueue_member(struct taskqueue *queue, struct thread *td)
886a9643ea8Slogwang {
887a9643ea8Slogwang int i, j, ret = 0;
888a9643ea8Slogwang
889a9643ea8Slogwang for (i = 0, j = 0; ; i++) {
890a9643ea8Slogwang if (queue->tq_threads[i] == NULL)
891a9643ea8Slogwang continue;
892a9643ea8Slogwang if (queue->tq_threads[i] == td) {
893a9643ea8Slogwang ret = 1;
894a9643ea8Slogwang break;
895a9643ea8Slogwang }
896a9643ea8Slogwang if (++j >= queue->tq_tcount)
897a9643ea8Slogwang break;
898a9643ea8Slogwang }
899a9643ea8Slogwang return (ret);
900a9643ea8Slogwang }
901