1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #ifndef _LINUX_WORKQUEUE_H_ 32 #define _LINUX_WORKQUEUE_H_ 33 34 #include <linux/types.h> 35 #include <linux/kernel.h> 36 #include <linux/timer.h> 37 #include <linux/slab.h> 38 39 #include <sys/taskqueue.h> 40 41 struct workqueue_struct { 42 struct taskqueue *taskqueue; 43 }; 44 45 struct work_struct { 46 struct task work_task; 47 struct taskqueue *taskqueue; 48 void (*fn)(struct work_struct *); 49 }; 50 51 typedef __typeof(((struct work_struct *)0)->fn) work_func_t; 52 53 struct delayed_work { 54 struct work_struct work; 55 struct callout timer; 56 }; 57 58 extern void linux_work_fn(void *, int); 59 extern void linux_flush_fn(void *, int); 60 extern void linux_delayed_work_fn(void *); 61 extern struct workqueue_struct *linux_create_workqueue_common(const char *, int); 62 extern void destroy_workqueue(struct workqueue_struct *); 63 64 static inline struct delayed_work * 65 to_delayed_work(struct work_struct *work) 66 { 67 68 return container_of(work, struct delayed_work, work); 69 } 70 71 #define INIT_WORK(work, func) \ 72 do { \ 73 (work)->fn = (func); \ 74 (work)->taskqueue = NULL; \ 75 TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work)); \ 76 } while (0) 77 78 #define INIT_DELAYED_WORK(_work, func) \ 79 do { \ 80 INIT_WORK(&(_work)->work, func); \ 81 callout_init(&(_work)->timer, 1); \ 82 } while (0) 83 84 #define INIT_DEFERRABLE_WORK(...) INIT_DELAYED_WORK(__VA_ARGS__) 85 86 #define schedule_work(work) \ 87 do { \ 88 (work)->taskqueue = taskqueue_thread; \ 89 taskqueue_enqueue(taskqueue_thread, &(work)->work_task); \ 90 } while (0) 91 92 #define flush_scheduled_work() flush_taskqueue(taskqueue_thread) 93 94 static inline int 95 queue_work(struct workqueue_struct *wq, struct work_struct *work) 96 { 97 work->taskqueue = wq->taskqueue; 98 /* Return opposite value to align with Linux logic */ 99 return (!taskqueue_enqueue(wq->taskqueue, &work->work_task)); 100 } 101 102 static inline int 103 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, 104 unsigned long delay) 105 { 106 int pending; 107 108 pending = work->work.work_task.ta_pending; 109 work->work.taskqueue = wq->taskqueue; 110 if (delay != 0) 111 callout_reset(&work->timer, delay, linux_delayed_work_fn, work); 112 else 113 linux_delayed_work_fn((void *)work); 114 115 return (!pending); 116 } 117 118 static inline bool 119 schedule_delayed_work(struct delayed_work *dwork, 120 unsigned long delay) 121 { 122 struct workqueue_struct wq; 123 124 wq.taskqueue = taskqueue_thread; 125 return (queue_delayed_work(&wq, dwork, delay)); 126 } 127 128 #define create_singlethread_workqueue(name) \ 129 linux_create_workqueue_common(name, 1) 130 131 #define create_workqueue(name) \ 132 linux_create_workqueue_common(name, MAXCPU) 133 134 #define alloc_ordered_workqueue(name, flags) \ 135 linux_create_workqueue_common(name, 1) 136 137 #define alloc_workqueue(name, flags, max_active) \ 138 linux_create_workqueue_common(name, max_active) 139 140 #define flush_workqueue(wq) flush_taskqueue((wq)->taskqueue) 141 142 static inline void 143 flush_taskqueue(struct taskqueue *tq) 144 { 145 struct task flushtask; 146 147 PHOLD(curproc); 148 TASK_INIT(&flushtask, 0, linux_flush_fn, NULL); 149 taskqueue_enqueue(tq, &flushtask); 150 taskqueue_drain(tq, &flushtask); 151 PRELE(curproc); 152 } 153 154 static inline int 155 cancel_work_sync(struct work_struct *work) 156 { 157 if (work->taskqueue && 158 taskqueue_cancel(work->taskqueue, &work->work_task, NULL)) 159 taskqueue_drain(work->taskqueue, &work->work_task); 160 return 0; 161 } 162 163 /* 164 * This may leave work running on another CPU as it does on Linux. 165 */ 166 static inline int 167 cancel_delayed_work(struct delayed_work *work) 168 { 169 170 callout_stop(&work->timer); 171 if (work->work.taskqueue) 172 return (taskqueue_cancel(work->work.taskqueue, 173 &work->work.work_task, NULL) == 0); 174 return 0; 175 } 176 177 static inline int 178 cancel_delayed_work_sync(struct delayed_work *work) 179 { 180 181 callout_drain(&work->timer); 182 if (work->work.taskqueue && 183 taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL)) 184 taskqueue_drain(work->work.taskqueue, &work->work.work_task); 185 return 0; 186 } 187 188 static inline bool 189 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork, 190 unsigned long delay) 191 { 192 cancel_delayed_work(dwork); 193 queue_delayed_work(wq, dwork, delay); 194 return false; 195 } 196 197 #endif /* _LINUX_WORKQUEUE_H_ */ 198