1*a9643ea8Slogwang /*- 2*a9643ea8Slogwang * Copyright (c) 2000 Doug Rabson 3*a9643ea8Slogwang * All rights reserved. 4*a9643ea8Slogwang * 5*a9643ea8Slogwang * Redistribution and use in source and binary forms, with or without 6*a9643ea8Slogwang * modification, are permitted provided that the following conditions 7*a9643ea8Slogwang * are met: 8*a9643ea8Slogwang * 1. Redistributions of source code must retain the above copyright 9*a9643ea8Slogwang * notice, this list of conditions and the following disclaimer. 10*a9643ea8Slogwang * 2. Redistributions in binary form must reproduce the above copyright 11*a9643ea8Slogwang * notice, this list of conditions and the following disclaimer in the 12*a9643ea8Slogwang * documentation and/or other materials provided with the distribution. 13*a9643ea8Slogwang * 14*a9643ea8Slogwang * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15*a9643ea8Slogwang * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16*a9643ea8Slogwang * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17*a9643ea8Slogwang * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18*a9643ea8Slogwang * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19*a9643ea8Slogwang * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20*a9643ea8Slogwang * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21*a9643ea8Slogwang * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22*a9643ea8Slogwang * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23*a9643ea8Slogwang * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24*a9643ea8Slogwang * SUCH DAMAGE. 25*a9643ea8Slogwang * 26*a9643ea8Slogwang * $FreeBSD$ 27*a9643ea8Slogwang */ 28*a9643ea8Slogwang 29*a9643ea8Slogwang #ifndef _SYS_TASKQUEUE_H_ 30*a9643ea8Slogwang #define _SYS_TASKQUEUE_H_ 31*a9643ea8Slogwang 32*a9643ea8Slogwang #ifndef _KERNEL 33*a9643ea8Slogwang #error "no user-servicable parts inside" 34*a9643ea8Slogwang #endif 35*a9643ea8Slogwang 36*a9643ea8Slogwang #include <sys/queue.h> 37*a9643ea8Slogwang #include <sys/_task.h> 38*a9643ea8Slogwang #include <sys/_callout.h> 39*a9643ea8Slogwang #include <sys/_cpuset.h> 40*a9643ea8Slogwang 41*a9643ea8Slogwang struct taskqueue; 42*a9643ea8Slogwang struct taskqgroup; 43*a9643ea8Slogwang struct thread; 44*a9643ea8Slogwang 45*a9643ea8Slogwang struct timeout_task { 46*a9643ea8Slogwang struct taskqueue *q; 47*a9643ea8Slogwang struct task t; 48*a9643ea8Slogwang struct callout c; 49*a9643ea8Slogwang int f; 50*a9643ea8Slogwang }; 51*a9643ea8Slogwang 52*a9643ea8Slogwang enum taskqueue_callback_type { 53*a9643ea8Slogwang TASKQUEUE_CALLBACK_TYPE_INIT, 54*a9643ea8Slogwang TASKQUEUE_CALLBACK_TYPE_SHUTDOWN, 55*a9643ea8Slogwang }; 56*a9643ea8Slogwang #define TASKQUEUE_CALLBACK_TYPE_MIN TASKQUEUE_CALLBACK_TYPE_INIT 57*a9643ea8Slogwang #define TASKQUEUE_CALLBACK_TYPE_MAX TASKQUEUE_CALLBACK_TYPE_SHUTDOWN 58*a9643ea8Slogwang #define TASKQUEUE_NUM_CALLBACKS TASKQUEUE_CALLBACK_TYPE_MAX + 1 59*a9643ea8Slogwang #define TASKQUEUE_NAMELEN 32 60*a9643ea8Slogwang 61*a9643ea8Slogwang typedef void (*taskqueue_callback_fn)(void *context); 62*a9643ea8Slogwang 63*a9643ea8Slogwang /* 64*a9643ea8Slogwang * A notification callback function which is called from 65*a9643ea8Slogwang * taskqueue_enqueue(). The context argument is given in the call to 66*a9643ea8Slogwang * taskqueue_create(). This function would normally be used to allow the 67*a9643ea8Slogwang * queue to arrange to run itself later (e.g., by scheduling a software 68*a9643ea8Slogwang * interrupt or waking a kernel thread). 69*a9643ea8Slogwang */ 70*a9643ea8Slogwang typedef void (*taskqueue_enqueue_fn)(void *context); 71*a9643ea8Slogwang 72*a9643ea8Slogwang struct taskqueue *taskqueue_create(const char *name, int mflags, 73*a9643ea8Slogwang taskqueue_enqueue_fn enqueue, 74*a9643ea8Slogwang void *context); 75*a9643ea8Slogwang int taskqueue_start_threads(struct taskqueue **tqp, int count, int pri, 76*a9643ea8Slogwang const char *name, ...) __printflike(4, 5); 77*a9643ea8Slogwang int taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, 78*a9643ea8Slogwang int pri, cpuset_t *mask, const char *name, ...) __printflike(5, 6); 79*a9643ea8Slogwang int taskqueue_enqueue(struct taskqueue *queue, struct task *task); 80*a9643ea8Slogwang int taskqueue_enqueue_timeout(struct taskqueue *queue, 81*a9643ea8Slogwang struct timeout_task *timeout_task, int ticks); 82*a9643ea8Slogwang int taskqueue_cancel(struct taskqueue *queue, struct task *task, 83*a9643ea8Slogwang u_int *pendp); 84*a9643ea8Slogwang int taskqueue_cancel_timeout(struct taskqueue *queue, 85*a9643ea8Slogwang struct timeout_task *timeout_task, u_int *pendp); 86*a9643ea8Slogwang void taskqueue_drain(struct taskqueue *queue, struct task *task); 87*a9643ea8Slogwang void taskqueue_drain_timeout(struct taskqueue *queue, 88*a9643ea8Slogwang struct timeout_task *timeout_task); 89*a9643ea8Slogwang void taskqueue_drain_all(struct taskqueue *queue); 90*a9643ea8Slogwang void taskqueue_free(struct taskqueue *queue); 91*a9643ea8Slogwang void taskqueue_run(struct taskqueue *queue); 92*a9643ea8Slogwang void taskqueue_block(struct taskqueue *queue); 93*a9643ea8Slogwang void taskqueue_unblock(struct taskqueue *queue); 94*a9643ea8Slogwang int taskqueue_member(struct taskqueue *queue, struct thread *td); 95*a9643ea8Slogwang void taskqueue_set_callback(struct taskqueue *queue, 96*a9643ea8Slogwang enum taskqueue_callback_type cb_type, 97*a9643ea8Slogwang taskqueue_callback_fn callback, void *context); 98*a9643ea8Slogwang 99*a9643ea8Slogwang #define TASK_INITIALIZER(priority, func, context) \ 100*a9643ea8Slogwang { .ta_pending = 0, \ 101*a9643ea8Slogwang .ta_priority = (priority), \ 102*a9643ea8Slogwang .ta_func = (func), \ 103*a9643ea8Slogwang .ta_context = (context) } 104*a9643ea8Slogwang 105*a9643ea8Slogwang /* 106*a9643ea8Slogwang * Functions for dedicated thread taskqueues 107*a9643ea8Slogwang */ 108*a9643ea8Slogwang void taskqueue_thread_loop(void *arg); 109*a9643ea8Slogwang void taskqueue_thread_enqueue(void *context); 110*a9643ea8Slogwang 111*a9643ea8Slogwang /* 112*a9643ea8Slogwang * Initialise a task structure. 113*a9643ea8Slogwang */ 114*a9643ea8Slogwang #define TASK_INIT(task, priority, func, context) do { \ 115*a9643ea8Slogwang (task)->ta_pending = 0; \ 116*a9643ea8Slogwang (task)->ta_priority = (priority); \ 117*a9643ea8Slogwang (task)->ta_func = (func); \ 118*a9643ea8Slogwang (task)->ta_context = (context); \ 119*a9643ea8Slogwang } while (0) 120*a9643ea8Slogwang 121*a9643ea8Slogwang void _timeout_task_init(struct taskqueue *queue, 122*a9643ea8Slogwang struct timeout_task *timeout_task, int priority, task_fn_t func, 123*a9643ea8Slogwang void *context); 124*a9643ea8Slogwang #define TIMEOUT_TASK_INIT(queue, timeout_task, priority, func, context) \ 125*a9643ea8Slogwang _timeout_task_init(queue, timeout_task, priority, func, context); 126*a9643ea8Slogwang 127*a9643ea8Slogwang /* 128*a9643ea8Slogwang * Declare a reference to a taskqueue. 129*a9643ea8Slogwang */ 130*a9643ea8Slogwang #define TASKQUEUE_DECLARE(name) \ 131*a9643ea8Slogwang extern struct taskqueue *taskqueue_##name 132*a9643ea8Slogwang 133*a9643ea8Slogwang /* 134*a9643ea8Slogwang * Define and initialise a global taskqueue that uses sleep mutexes. 135*a9643ea8Slogwang */ 136*a9643ea8Slogwang #define TASKQUEUE_DEFINE(name, enqueue, context, init) \ 137*a9643ea8Slogwang \ 138*a9643ea8Slogwang struct taskqueue *taskqueue_##name; \ 139*a9643ea8Slogwang \ 140*a9643ea8Slogwang static void \ 141*a9643ea8Slogwang taskqueue_define_##name(void *arg) \ 142*a9643ea8Slogwang { \ 143*a9643ea8Slogwang taskqueue_##name = \ 144*a9643ea8Slogwang taskqueue_create(#name, M_WAITOK, (enqueue), (context)); \ 145*a9643ea8Slogwang init; \ 146*a9643ea8Slogwang } \ 147*a9643ea8Slogwang \ 148*a9643ea8Slogwang SYSINIT(taskqueue_##name, SI_SUB_INIT_IF, SI_ORDER_SECOND, \ 149*a9643ea8Slogwang taskqueue_define_##name, NULL); \ 150*a9643ea8Slogwang \ 151*a9643ea8Slogwang struct __hack 152*a9643ea8Slogwang #define TASKQUEUE_DEFINE_THREAD(name) \ 153*a9643ea8Slogwang TASKQUEUE_DEFINE(name, taskqueue_thread_enqueue, &taskqueue_##name, \ 154*a9643ea8Slogwang taskqueue_start_threads(&taskqueue_##name, 1, PWAIT, \ 155*a9643ea8Slogwang "%s taskq", #name)) 156*a9643ea8Slogwang 157*a9643ea8Slogwang /* 158*a9643ea8Slogwang * Define and initialise a global taskqueue that uses spin mutexes. 159*a9643ea8Slogwang */ 160*a9643ea8Slogwang #define TASKQUEUE_FAST_DEFINE(name, enqueue, context, init) \ 161*a9643ea8Slogwang \ 162*a9643ea8Slogwang struct taskqueue *taskqueue_##name; \ 163*a9643ea8Slogwang \ 164*a9643ea8Slogwang static void \ 165*a9643ea8Slogwang taskqueue_define_##name(void *arg) \ 166*a9643ea8Slogwang { \ 167*a9643ea8Slogwang taskqueue_##name = \ 168*a9643ea8Slogwang taskqueue_create_fast(#name, M_WAITOK, (enqueue), \ 169*a9643ea8Slogwang (context)); \ 170*a9643ea8Slogwang init; \ 171*a9643ea8Slogwang } \ 172*a9643ea8Slogwang \ 173*a9643ea8Slogwang SYSINIT(taskqueue_##name, SI_SUB_INIT_IF, SI_ORDER_SECOND, \ 174*a9643ea8Slogwang taskqueue_define_##name, NULL); \ 175*a9643ea8Slogwang \ 176*a9643ea8Slogwang struct __hack 177*a9643ea8Slogwang #define TASKQUEUE_FAST_DEFINE_THREAD(name) \ 178*a9643ea8Slogwang TASKQUEUE_FAST_DEFINE(name, taskqueue_thread_enqueue, \ 179*a9643ea8Slogwang &taskqueue_##name, taskqueue_start_threads(&taskqueue_##name \ 180*a9643ea8Slogwang 1, PWAIT, "%s taskq", #name)) 181*a9643ea8Slogwang 182*a9643ea8Slogwang /* 183*a9643ea8Slogwang * These queues are serviced by software interrupt handlers. To enqueue 184*a9643ea8Slogwang * a task, call taskqueue_enqueue(taskqueue_swi, &task) or 185*a9643ea8Slogwang * taskqueue_enqueue(taskqueue_swi_giant, &task). 186*a9643ea8Slogwang */ 187*a9643ea8Slogwang TASKQUEUE_DECLARE(swi_giant); 188*a9643ea8Slogwang TASKQUEUE_DECLARE(swi); 189*a9643ea8Slogwang 190*a9643ea8Slogwang /* 191*a9643ea8Slogwang * This queue is serviced by a kernel thread. To enqueue a task, call 192*a9643ea8Slogwang * taskqueue_enqueue(taskqueue_thread, &task). 193*a9643ea8Slogwang */ 194*a9643ea8Slogwang TASKQUEUE_DECLARE(thread); 195*a9643ea8Slogwang 196*a9643ea8Slogwang /* 197*a9643ea8Slogwang * Queue for swi handlers dispatched from fast interrupt handlers. 198*a9643ea8Slogwang * These are necessarily different from the above because the queue 199*a9643ea8Slogwang * must be locked with spinlocks since sleep mutex's cannot be used 200*a9643ea8Slogwang * from a fast interrupt handler context. 201*a9643ea8Slogwang */ 202*a9643ea8Slogwang TASKQUEUE_DECLARE(fast); 203*a9643ea8Slogwang struct taskqueue *taskqueue_create_fast(const char *name, int mflags, 204*a9643ea8Slogwang taskqueue_enqueue_fn enqueue, 205*a9643ea8Slogwang void *context); 206*a9643ea8Slogwang 207*a9643ea8Slogwang #endif /* !_SYS_TASKQUEUE_H_ */ 208