1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #ifndef _LINUX_WORKQUEUE_H_ 32 #define _LINUX_WORKQUEUE_H_ 33 34 #include <linux/types.h> 35 #include <linux/kernel.h> 36 #include <linux/timer.h> 37 #include <linux/slab.h> 38 39 #include <sys/taskqueue.h> 40 41 struct workqueue_struct { 42 struct taskqueue *taskqueue; 43 }; 44 45 struct work_struct { 46 struct task work_task; 47 struct taskqueue *taskqueue; 48 void (*fn)(struct work_struct *); 49 }; 50 51 typedef __typeof(((struct work_struct *)0)->fn) work_func_t; 52 53 struct delayed_work { 54 struct work_struct work; 55 struct callout timer; 56 }; 57 58 extern void linux_work_fn(void *, int); 59 extern void linux_flush_fn(void *, int); 60 extern void linux_delayed_work_fn(void *); 61 extern struct workqueue_struct *linux_create_workqueue_common(const char *, int); 62 extern void destroy_workqueue(struct workqueue_struct *); 63 64 static inline struct delayed_work * 65 to_delayed_work(struct work_struct *work) 66 { 67 68 return container_of(work, struct delayed_work, work); 69 } 70 71 #define INIT_WORK(work, func) \ 72 do { \ 73 (work)->fn = (func); \ 74 (work)->taskqueue = NULL; \ 75 TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \ 76 } while (0) 77 78 #define INIT_DELAYED_WORK(_work, func) \ 79 do { \ 80 INIT_WORK(&(_work)->work, func); \ 81 callout_init(&(_work)->timer, 1); \ 82 } while (0) 83 84 #define INIT_DEFERRABLE_WORK(...) INIT_DELAYED_WORK(__VA_ARGS__) 85 86 #define schedule_work(work) \ 87 do { \ 88 (work)->taskqueue = taskqueue_thread; \ 89 taskqueue_enqueue(taskqueue_thread, &(work)->work_task); \ 90 } while (0) 91 92 #define flush_scheduled_work() flush_taskqueue(taskqueue_thread) 93 94 static inline int 95 queue_work(struct workqueue_struct *wq, struct work_struct *work) 96 { 97 work->taskqueue = wq->taskqueue; 98 /* Return opposite value to align with Linux logic */ 99 return (!taskqueue_enqueue(wq->taskqueue, &work->work_task)); 100 } 101 102 static inline int 103 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, 104 unsigned long delay) 105 { 106 int pending; 107 108 work->work.taskqueue = wq->taskqueue; 109 if (delay != 0) { 110 pending = work->work.work_task.ta_pending; 111 callout_reset(&work->timer, delay, linux_delayed_work_fn, work); 112 } else { 113 callout_stop(&work->timer); 114 pending = taskqueue_enqueue(work->work.taskqueue, 115 &work->work.work_task); 116 } 117 return (!pending); 118 } 119 120 static inline bool 121 schedule_delayed_work(struct delayed_work *dwork, 122 unsigned long delay) 123 { 124 struct workqueue_struct wq; 125 126 wq.taskqueue = taskqueue_thread; 127 return (queue_delayed_work(&wq, dwork, delay)); 128 } 129 130 #define create_singlethread_workqueue(name) \ 131 linux_create_workqueue_common(name, 1) 132 133 #define create_workqueue(name) \ 134 linux_create_workqueue_common(name, MAXCPU) 135 136 #define alloc_ordered_workqueue(name, flags) \ 137 linux_create_workqueue_common(name, 1) 138 139 #define alloc_workqueue(name, flags, max_active) \ 140 linux_create_workqueue_common(name, max_active) 141 142 #define flush_workqueue(wq) flush_taskqueue((wq)->taskqueue) 143 144 static inline void 145 flush_taskqueue(struct taskqueue *tq) 146 { 147 struct task flushtask; 148 149 PHOLD(curproc); 150 TASK_INIT(&flushtask, 0, linux_flush_fn, NULL); 151 taskqueue_enqueue(tq, &flushtask); 152 taskqueue_drain(tq, &flushtask); 153 PRELE(curproc); 154 } 155 156 static inline int 157 cancel_work_sync(struct work_struct *work) 158 { 159 if (work->taskqueue && 160 taskqueue_cancel(work->taskqueue, &work->work_task, NULL)) 161 taskqueue_drain(work->taskqueue, &work->work_task); 162 return 0; 163 } 164 165 /* 166 * This may leave work running on another CPU as it does on Linux. 167 */ 168 static inline int 169 cancel_delayed_work(struct delayed_work *work) 170 { 171 172 callout_stop(&work->timer); 173 if (work->work.taskqueue) 174 return (taskqueue_cancel(work->work.taskqueue, 175 &work->work.work_task, NULL) == 0); 176 return 0; 177 } 178 179 static inline int 180 cancel_delayed_work_sync(struct delayed_work *work) 181 { 182 183 callout_drain(&work->timer); 184 if (work->work.taskqueue && 185 taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL)) 186 taskqueue_drain(work->work.taskqueue, &work->work.work_task); 187 return 0; 188 } 189 190 static inline bool 191 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, 192 unsigned long delay) 193 { 194 cancel_delayed_work(dwork); 195 queue_delayed_work(wq, dwork, delay); 196 return false; 197 } 198 199 #endif /* _LINUX_WORKQUEUE_H_ */ 200