1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)queue.h 8.5 (Berkeley) 8/20/94
57 */
58
59 #ifndef _SYS_QUEUE_H_
60 #define _SYS_QUEUE_H_
61
62 #ifdef KERNEL_PRIVATE
63 #include <kern/debug.h> /* panic function call */
64 #include <machine/trap.h>
65 #include <sys/cdefs.h> /* __improbable in kernelspace */
66 #else
67 #ifndef __improbable
68 #define __improbable(x) (x) /* noop in userspace */
69 #endif /* __improbable */
70 #endif /* KERNEL_PRIVATE */
71
72 /*
73 * This file defines five types of data structures: singly-linked lists,
74 * singly-linked tail queues, lists, tail queues, and circular queues.
75 *
76 * A singly-linked list is headed by a single forward pointer. The elements
77 * are singly linked for minimum space and pointer manipulation overhead at
78 * the expense of O(n) removal for arbitrary elements. New elements can be
79 * added to the list after an existing element or at the head of the list.
80 * Elements being removed from the head of the list should use the explicit
81 * macro for this purpose for optimum efficiency. A singly-linked list may
82 * only be traversed in the forward direction. Singly-linked lists are ideal
83 * for applications with large datasets and few or no removals or for
84 * implementing a LIFO queue.
85 *
86 * A singly-linked tail queue is headed by a pair of pointers, one to the
87 * head of the list and the other to the tail of the list. The elements are
88 * singly linked for minimum space and pointer manipulation overhead at the
89 * expense of O(n) removal for arbitrary elements. New elements can be added
90 * to the list after an existing element, at the head of the list, or at the
91 * end of the list. Elements being removed from the head of the tail queue
92 * should use the explicit macro for this purpose for optimum efficiency.
93 * A singly-linked tail queue may only be traversed in the forward direction.
94 * Singly-linked tail queues are ideal for applications with large datasets
95 * and few or no removals or for implementing a FIFO queue.
96 *
97 * A list is headed by a single forward pointer (or an array of forward
98 * pointers for a hash table header). The elements are doubly linked
99 * so that an arbitrary element can be removed without a need to
100 * traverse the list. New elements can be added to the list before
101 * or after an existing element or at the head of the list. A list
102 * may only be traversed in the forward direction.
103 *
104 * A tail queue is headed by a pair of pointers, one to the head of the
105 * list and the other to the tail of the list. The elements are doubly
106 * linked so that an arbitrary element can be removed without a need to
107 * traverse the list. New elements can be added to the list before or
108 * after an existing element, at the head of the list, or at the end of
109 * the list. A tail queue may be traversed in either direction.
110 *
111 * A circle queue is headed by a pair of pointers, one to the head of the
112 * list and the other to the tail of the list. The elements are doubly
113 * linked so that an arbitrary element can be removed without a need to
114 * traverse the list. New elements can be added to the list before or after
115 * an existing element, at the head of the list, or at the end of the list.
116 * A circle queue may be traversed in either direction, but has a more
117 * complex end of list detection.
118 * Note that circle queues are deprecated, because, as the removal log
119 * in FreeBSD states, "CIRCLEQs are a disgrace to everything Knuth taught
120 * us in Volume 1 Chapter 2. [...] Use TAILQ instead, it provides the same
121 * functionality." Code using them will continue to compile, but they
122 * are no longer documented on the man page.
123 *
124 * For details on the use of these macros, see the queue(3) manual page.
125 *
126 *
127 * SLIST LIST STAILQ TAILQ CIRCLEQ
128 * _HEAD + + + + +
129 * _HEAD_INITIALIZER + + + + -
130 * _ENTRY + + + + +
131 * _INIT + + + + +
132 * _EMPTY + + + + +
133 * _FIRST + + + + +
134 * _NEXT + + + + +
135 * _PREV - - - + +
136 * _LAST - - + + +
137 * _FOREACH + + + + +
138 * _FOREACH_SAFE + + + + -
139 * _FOREACH_REVERSE - - - + -
140 * _FOREACH_REVERSE_SAFE - - - + -
141 * _INSERT_HEAD + + + + +
142 * _INSERT_BEFORE - + - + +
143 * _INSERT_AFTER + + + + +
144 * _INSERT_TAIL - - + + +
145 * _CONCAT - - + + -
146 * _REMOVE_AFTER + - + - -
147 * _REMOVE_HEAD + - + - -
148 * _REMOVE_HEAD_UNTIL - - + - -
149 * _REMOVE + + + + +
150 * _SWAP - + + + -
151 *
152 */
153 #ifdef QUEUE_MACRO_DEBUG
154 /* Store the last 2 places the queue element or head was altered */
155 struct qm_trace {
156 char * lastfile;
157 int lastline;
158 char * prevfile;
159 int prevline;
160 };
161
162 #define TRACEBUF struct qm_trace trace;
163 #define TRASHIT(x) do {(x) = (void *)-1;} while (0)
164
165 #define QMD_TRACE_HEAD(head) do { \
166 (head)->trace.prevline = (head)->trace.lastline; \
167 (head)->trace.prevfile = (head)->trace.lastfile; \
168 (head)->trace.lastline = __LINE__; \
169 (head)->trace.lastfile = __FILE__; \
170 } while (0)
171
172 #define QMD_TRACE_ELEM(elem) do { \
173 (elem)->trace.prevline = (elem)->trace.lastline; \
174 (elem)->trace.prevfile = (elem)->trace.lastfile; \
175 (elem)->trace.lastline = __LINE__; \
176 (elem)->trace.lastfile = __FILE__; \
177 } while (0)
178
179 #else
180 #define QMD_TRACE_ELEM(elem)
181 #define QMD_TRACE_HEAD(head)
182 #define TRACEBUF
183 #define TRASHIT(x)
184 #endif /* QUEUE_MACRO_DEBUG */
185
186 /*
187 * Horrible macros to enable use of code that was meant to be C-specific
188 * (and which push struct onto type) in C++; without these, C++ code
189 * that uses these macros in the context of a class will blow up
190 * due to "struct" being preprended to "type" by the macros, causing
191 * inconsistent use of tags.
192 *
193 * This approach is necessary because these are macros; we have to use
194 * these on a per-macro basis (because the queues are implemented as
195 * macros, disabling this warning in the scope of the header file is
196 * insufficient), whuch means we can't use #pragma, and have to use
197 * _Pragma. We only need to use these for the queue macros that
198 * prepend "struct" to "type" and will cause C++ to blow up.
199 */
200 #if defined(__clang__) && defined(__cplusplus)
201 #define __MISMATCH_TAGS_PUSH \
202 _Pragma("clang diagnostic push") \
203 _Pragma("clang diagnostic ignored \"-Wmismatched-tags\"")
204 #define __MISMATCH_TAGS_POP \
205 _Pragma("clang diagnostic pop")
206 #else
207 #define __MISMATCH_TAGS_PUSH
208 #define __MISMATCH_TAGS_POP
209 #endif
210
211 /*!
212 * Ensures that these macros can safely be used in structs when compiling with
213 * clang. The macros do not allow for nullability attributes to be specified due
214 * to how they are expanded. For example:
215 *
216 * SLIST_HEAD(, foo _Nullable) bar;
217 *
218 * expands to
219 *
220 * struct {
221 * struct foo _Nullable *slh_first;
222 * }
223 *
224 * which is not valid because the nullability specifier has to apply to the
225 * pointer. So just ignore nullability completeness in all the places where this
226 * is an issue.
227 */
228 #if defined(__clang__)
229 #define __NULLABILITY_COMPLETENESS_PUSH \
230 _Pragma("clang diagnostic push") \
231 _Pragma("clang diagnostic ignored \"-Wnullability-completeness\"")
232 #define __NULLABILITY_COMPLETENESS_POP \
233 _Pragma("clang diagnostic pop")
234 #else
235 #define __NULLABILITY_COMPLETENESS_PUSH
236 #define __NULLABILITY_COMPLETENESS_POP
237 #endif
238
239 /*
240 * Singly-linked List declarations.
241 */
242 #define SLIST_HEAD(name, type) \
243 __MISMATCH_TAGS_PUSH \
244 __NULLABILITY_COMPLETENESS_PUSH \
245 struct name { \
246 struct type *slh_first; /* first element */ \
247 } \
248 __NULLABILITY_COMPLETENESS_POP \
249 __MISMATCH_TAGS_POP
250
251 #define SLIST_HEAD_INITIALIZER(head) \
252 { NULL }
253
254 #define SLIST_ENTRY(type) \
255 __MISMATCH_TAGS_PUSH \
256 __NULLABILITY_COMPLETENESS_PUSH \
257 struct { \
258 struct type *sle_next; /* next element */ \
259 } \
260 __NULLABILITY_COMPLETENESS_POP \
261 __MISMATCH_TAGS_POP
262
263 /*
264 * Singly-linked List functions.
265 */
266 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
267
268 #define SLIST_FIRST(head) ((head)->slh_first)
269
270 #define SLIST_FOREACH(var, head, field) \
271 for ((var) = SLIST_FIRST((head)); \
272 (var); \
273 (var) = SLIST_NEXT((var), field))
274
275 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
276 for ((var) = SLIST_FIRST((head)); \
277 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
278 (var) = (tvar))
279
280 #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
281 for ((varp) = &SLIST_FIRST((head)); \
282 ((var) = *(varp)) != NULL; \
283 (varp) = &SLIST_NEXT((var), field))
284
285 #define SLIST_INIT(head) do { \
286 SLIST_FIRST((head)) = NULL; \
287 } while (0)
288
289 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
290 SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
291 SLIST_NEXT((slistelm), field) = (elm); \
292 } while (0)
293
294 #define SLIST_INSERT_HEAD(head, elm, field) do { \
295 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
296 SLIST_FIRST((head)) = (elm); \
297 } while (0)
298
299 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
300
301 #define SLIST_REMOVE(head, elm, type, field) \
302 __MISMATCH_TAGS_PUSH \
303 __NULLABILITY_COMPLETENESS_PUSH \
304 do { \
305 if (SLIST_FIRST((head)) == (elm)) { \
306 SLIST_REMOVE_HEAD((head), field); \
307 } \
308 else { \
309 struct type *curelm = SLIST_FIRST((head)); \
310 while (SLIST_NEXT(curelm, field) != (elm)) \
311 curelm = SLIST_NEXT(curelm, field); \
312 SLIST_REMOVE_AFTER(curelm, field); \
313 } \
314 TRASHIT((elm)->field.sle_next); \
315 } while (0) \
316 __NULLABILITY_COMPLETENESS_POP \
317 __MISMATCH_TAGS_POP
318
319 #define SLIST_REMOVE_AFTER(elm, field) do { \
320 SLIST_NEXT(elm, field) = \
321 SLIST_NEXT(SLIST_NEXT(elm, field), field); \
322 } while (0)
323
324 #define SLIST_REMOVE_HEAD(head, field) do { \
325 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
326 } while (0)
327
328 /*
329 * Singly-linked Tail queue declarations.
330 */
331 #define STAILQ_HEAD(name, type) \
332 __MISMATCH_TAGS_PUSH \
333 __NULLABILITY_COMPLETENESS_PUSH \
334 struct name { \
335 struct type *stqh_first;/* first element */ \
336 struct type **stqh_last;/* addr of last next element */ \
337 } \
338 __NULLABILITY_COMPLETENESS_POP \
339 __MISMATCH_TAGS_POP
340
341 #define STAILQ_HEAD_INITIALIZER(head) \
342 { NULL, &(head).stqh_first }
343
344 #define STAILQ_ENTRY(type) \
345 __MISMATCH_TAGS_PUSH \
346 __NULLABILITY_COMPLETENESS_PUSH \
347 struct { \
348 struct type *stqe_next; /* next element */ \
349 } \
350 __NULLABILITY_COMPLETENESS_POP \
351 __MISMATCH_TAGS_POP
352
353 /*
354 * Singly-linked Tail queue functions.
355 */
356 #define STAILQ_CONCAT(head1, head2) do { \
357 if (!STAILQ_EMPTY((head2))) { \
358 *(head1)->stqh_last = (head2)->stqh_first; \
359 (head1)->stqh_last = (head2)->stqh_last; \
360 STAILQ_INIT((head2)); \
361 } \
362 } while (0)
363
364 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
365
366 #define STAILQ_FIRST(head) ((head)->stqh_first)
367
368 #define STAILQ_FOREACH(var, head, field) \
369 for((var) = STAILQ_FIRST((head)); \
370 (var); \
371 (var) = STAILQ_NEXT((var), field))
372
373
374 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
375 for ((var) = STAILQ_FIRST((head)); \
376 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
377 (var) = (tvar))
378
379 #define STAILQ_INIT(head) do { \
380 STAILQ_FIRST((head)) = NULL; \
381 (head)->stqh_last = &STAILQ_FIRST((head)); \
382 } while (0)
383
384 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
385 if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
386 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
387 STAILQ_NEXT((tqelm), field) = (elm); \
388 } while (0)
389
390 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
391 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
392 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
393 STAILQ_FIRST((head)) = (elm); \
394 } while (0)
395
396 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
397 STAILQ_NEXT((elm), field) = NULL; \
398 *(head)->stqh_last = (elm); \
399 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
400 } while (0)
401
402 #define STAILQ_LAST(head, type, field) \
403 __MISMATCH_TAGS_PUSH \
404 __NULLABILITY_COMPLETENESS_PUSH \
405 (STAILQ_EMPTY((head)) ? \
406 NULL : \
407 ((struct type *)(void *) \
408 ((char *)((head)->stqh_last) - __offsetof(struct type, field))))\
409 __NULLABILITY_COMPLETENESS_POP \
410 __MISMATCH_TAGS_POP
411
412 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
413
414 #define STAILQ_REMOVE(head, elm, type, field) \
415 __MISMATCH_TAGS_PUSH \
416 __NULLABILITY_COMPLETENESS_PUSH \
417 do { \
418 if (STAILQ_FIRST((head)) == (elm)) { \
419 STAILQ_REMOVE_HEAD((head), field); \
420 } \
421 else { \
422 struct type *curelm = STAILQ_FIRST((head)); \
423 while (STAILQ_NEXT(curelm, field) != (elm)) \
424 curelm = STAILQ_NEXT(curelm, field); \
425 STAILQ_REMOVE_AFTER(head, curelm, field); \
426 } \
427 TRASHIT((elm)->field.stqe_next); \
428 } while (0) \
429 __NULLABILITY_COMPLETENESS_POP \
430 __MISMATCH_TAGS_POP
431
432 #define STAILQ_REMOVE_HEAD(head, field) do { \
433 if ((STAILQ_FIRST((head)) = \
434 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
435 (head)->stqh_last = &STAILQ_FIRST((head)); \
436 } while (0)
437
438 #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
439 if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
440 (head)->stqh_last = &STAILQ_FIRST((head)); \
441 } while (0)
442
443 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \
444 if ((STAILQ_NEXT(elm, field) = \
445 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
446 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
447 } while (0)
448
449 #define STAILQ_SWAP(head1, head2, type) \
450 __MISMATCH_TAGS_PUSH \
451 __NULLABILITY_COMPLETENESS_PUSH \
452 do { \
453 struct type *swap_first = STAILQ_FIRST(head1); \
454 struct type **swap_last = (head1)->stqh_last; \
455 STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
456 (head1)->stqh_last = (head2)->stqh_last; \
457 STAILQ_FIRST(head2) = swap_first; \
458 (head2)->stqh_last = swap_last; \
459 if (STAILQ_EMPTY(head1)) \
460 (head1)->stqh_last = &STAILQ_FIRST(head1); \
461 if (STAILQ_EMPTY(head2)) \
462 (head2)->stqh_last = &STAILQ_FIRST(head2); \
463 } while (0) \
464 __NULLABILITY_COMPLETENESS_POP \
465 __MISMATCH_TAGS_POP
466
467
468 /*
469 * List declarations.
470 */
471 #define LIST_HEAD(name, type) \
472 __MISMATCH_TAGS_PUSH \
473 __NULLABILITY_COMPLETENESS_PUSH \
474 struct name { \
475 struct type *lh_first; /* first element */ \
476 } \
477 __NULLABILITY_COMPLETENESS_POP \
478 __MISMATCH_TAGS_POP
479
480 #define LIST_HEAD_INITIALIZER(head) \
481 { NULL }
482
483 #define LIST_ENTRY(type) \
484 __MISMATCH_TAGS_PUSH \
485 __NULLABILITY_COMPLETENESS_PUSH \
486 struct { \
487 struct type *le_next; /* next element */ \
488 struct type **le_prev; /* address of previous next element */ \
489 } \
490 __NULLABILITY_COMPLETENESS_POP \
491 __MISMATCH_TAGS_POP
492
493 /*
494 * List functions.
495 */
496
497 #ifdef KERNEL_PRIVATE
498 #define LIST_CHECK_HEAD(head, field) do { \
499 if (__improbable( \
500 LIST_FIRST((head)) != NULL && \
501 LIST_FIRST((head))->field.le_prev != \
502 &LIST_FIRST((head)))) \
503 ml_fatal_trap_invalid_list_linkage((uintptr_t)(head)); \
504 } while (0)
505
506 #define LIST_CHECK_NEXT(elm, field) do { \
507 if (__improbable( \
508 LIST_NEXT((elm), field) != NULL && \
509 LIST_NEXT((elm), field)->field.le_prev != \
510 &((elm)->field.le_next))) \
511 ml_fatal_trap_invalid_list_linkage((uintptr_t)(elm)); \
512 } while (0)
513
514 #define LIST_CHECK_PREV(elm, field) do { \
515 if (__improbable(*(elm)->field.le_prev != (elm))) \
516 ml_fatal_trap_invalid_list_linkage((uintptr_t)(elm)); \
517 } while (0)
518 #else
519 #define LIST_CHECK_HEAD(head, field)
520 #define LIST_CHECK_NEXT(elm, field)
521 #define LIST_CHECK_PREV(elm, field)
522 #endif /* KERNEL_PRIVATE */
523
524 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
525
526 #define LIST_FIRST(head) ((head)->lh_first)
527
528 #define LIST_FOREACH(var, head, field) \
529 for ((var) = LIST_FIRST((head)); \
530 (var); \
531 (var) = LIST_NEXT((var), field))
532
533 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
534 for ((var) = LIST_FIRST((head)); \
535 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
536 (var) = (tvar))
537
538 #define LIST_INIT(head) do { \
539 LIST_FIRST((head)) = NULL; \
540 } while (0)
541
542 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
543 LIST_CHECK_NEXT(listelm, field); \
544 if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
545 LIST_NEXT((listelm), field)->field.le_prev = \
546 &LIST_NEXT((elm), field); \
547 LIST_NEXT((listelm), field) = (elm); \
548 (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
549 } while (0)
550
551 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
552 LIST_CHECK_PREV(listelm, field); \
553 (elm)->field.le_prev = (listelm)->field.le_prev; \
554 LIST_NEXT((elm), field) = (listelm); \
555 *(listelm)->field.le_prev = (elm); \
556 (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
557 } while (0)
558
559 #define LIST_INSERT_HEAD(head, elm, field) do { \
560 LIST_CHECK_HEAD((head), field); \
561 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
562 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
563 LIST_FIRST((head)) = (elm); \
564 (elm)->field.le_prev = &LIST_FIRST((head)); \
565 } while (0)
566
567 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
568
569 #define LIST_REMOVE(elm, field) do { \
570 LIST_CHECK_NEXT(elm, field); \
571 LIST_CHECK_PREV(elm, field); \
572 if (LIST_NEXT((elm), field) != NULL) \
573 LIST_NEXT((elm), field)->field.le_prev = \
574 (elm)->field.le_prev; \
575 *(elm)->field.le_prev = LIST_NEXT((elm), field); \
576 TRASHIT((elm)->field.le_next); \
577 TRASHIT((elm)->field.le_prev); \
578 } while (0)
579
580 #define LIST_SWAP(head1, head2, type, field) \
581 __MISMATCH_TAGS_PUSH \
582 __NULLABILITY_COMPLETENESS_PUSH \
583 do { \
584 struct type *swap_tmp = LIST_FIRST((head1)); \
585 LIST_FIRST((head1)) = LIST_FIRST((head2)); \
586 LIST_FIRST((head2)) = swap_tmp; \
587 if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
588 swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
589 if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
590 swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
591 } while (0) \
592 __NULLABILITY_COMPLETENESS_POP \
593 __MISMATCH_TAGS_POP
594
595 /*
596 * Tail queue declarations.
597 */
598 #define TAILQ_HEAD(name, type) \
599 __MISMATCH_TAGS_PUSH \
600 __NULLABILITY_COMPLETENESS_PUSH \
601 struct name { \
602 struct type *tqh_first; /* first element */ \
603 struct type **tqh_last; /* addr of last next element */ \
604 TRACEBUF \
605 } \
606 __NULLABILITY_COMPLETENESS_POP \
607 __MISMATCH_TAGS_POP
608
609 #define TAILQ_HEAD_INITIALIZER(head) \
610 { NULL, &(head).tqh_first }
611
612 #define TAILQ_ENTRY(type) \
613 __MISMATCH_TAGS_PUSH \
614 __NULLABILITY_COMPLETENESS_PUSH \
615 struct { \
616 struct type *tqe_next; /* next element */ \
617 struct type **tqe_prev; /* address of previous next element */ \
618 TRACEBUF \
619 } \
620 __NULLABILITY_COMPLETENESS_POP \
621 __MISMATCH_TAGS_POP
622
623 /*
624 * Tail queue functions.
625 */
626 #ifdef KERNEL_PRIVATE
627 #define TAILQ_CHECK_HEAD(head, field) do { \
628 if (__improbable( \
629 TAILQ_FIRST((head)) != NULL && \
630 TAILQ_FIRST((head))->field.tqe_prev != \
631 &TAILQ_FIRST((head)))) \
632 ml_fatal_trap_invalid_list_linkage((uintptr_t)(head)); \
633 } while (0)
634
635 #define TAILQ_CHECK_NEXT(elm, field) do { \
636 if (__improbable( \
637 TAILQ_NEXT((elm), field) != NULL && \
638 TAILQ_NEXT((elm), field)->field.tqe_prev != \
639 &((elm)->field.tqe_next))) \
640 ml_fatal_trap_invalid_list_linkage((uintptr_t)(elm)); \
641 } while(0)
642
643 #define TAILQ_CHECK_PREV(elm, field) do { \
644 if (__improbable(*(elm)->field.tqe_prev != (elm))) \
645 ml_fatal_trap_invalid_list_linkage((uintptr_t)(elm)); \
646 } while(0)
647 #else
648 #define TAILQ_CHECK_HEAD(head, field)
649 #define TAILQ_CHECK_NEXT(elm, field)
650 #define TAILQ_CHECK_PREV(elm, field)
651 #endif /* KERNEL_PRIVATE */
652
653 #define TAILQ_CONCAT(head1, head2, field) do { \
654 if (!TAILQ_EMPTY(head2)) { \
655 *(head1)->tqh_last = (head2)->tqh_first; \
656 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
657 (head1)->tqh_last = (head2)->tqh_last; \
658 TAILQ_INIT((head2)); \
659 QMD_TRACE_HEAD(head1); \
660 QMD_TRACE_HEAD(head2); \
661 } \
662 } while (0)
663
664 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
665
666 #define TAILQ_FIRST(head) ((head)->tqh_first)
667
668 #define TAILQ_FOREACH(var, head, field) \
669 for ((var) = TAILQ_FIRST((head)); \
670 (var); \
671 (var) = TAILQ_NEXT((var), field))
672
673 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
674 for ((var) = TAILQ_FIRST((head)); \
675 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
676 (var) = (tvar))
677
678 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
679 for ((var) = TAILQ_LAST((head), headname); \
680 (var); \
681 (var) = TAILQ_PREV((var), headname, field))
682
683 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
684 for ((var) = TAILQ_LAST((head), headname); \
685 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
686 (var) = (tvar))
687
688 #if XNU_KERNEL_PRIVATE
689 /*
690 * Can be used when the initialized HEAD was just bzeroed
691 * Works around deficiencies in clang analysis of initialization patterns.
692 * See: <rdar://problem/47939050>
693 */
694 #define TAILQ_INIT_AFTER_BZERO(head) do { \
695 (head)->tqh_last = &TAILQ_FIRST((head)); \
696 } while (0)
697 #endif /* XNU_KERNEL_PRIVATE */
698
699 #define TAILQ_INIT(head) do { \
700 TAILQ_FIRST((head)) = NULL; \
701 (head)->tqh_last = &TAILQ_FIRST((head)); \
702 QMD_TRACE_HEAD(head); \
703 } while (0)
704
705
706 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
707 TAILQ_CHECK_NEXT(listelm, field); \
708 if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
709 TAILQ_NEXT((elm), field)->field.tqe_prev = \
710 &TAILQ_NEXT((elm), field); \
711 else { \
712 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
713 QMD_TRACE_HEAD(head); \
714 } \
715 TAILQ_NEXT((listelm), field) = (elm); \
716 (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
717 QMD_TRACE_ELEM(&(elm)->field); \
718 QMD_TRACE_ELEM(&listelm->field); \
719 } while (0)
720
721 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
722 TAILQ_CHECK_PREV(listelm, field); \
723 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
724 TAILQ_NEXT((elm), field) = (listelm); \
725 *(listelm)->field.tqe_prev = (elm); \
726 (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
727 QMD_TRACE_ELEM(&(elm)->field); \
728 QMD_TRACE_ELEM(&listelm->field); \
729 } while (0)
730
731 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
732 TAILQ_CHECK_HEAD(head, field); \
733 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
734 TAILQ_FIRST((head))->field.tqe_prev = \
735 &TAILQ_NEXT((elm), field); \
736 else \
737 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
738 TAILQ_FIRST((head)) = (elm); \
739 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
740 QMD_TRACE_HEAD(head); \
741 QMD_TRACE_ELEM(&(elm)->field); \
742 } while (0)
743
744 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
745 TAILQ_NEXT((elm), field) = NULL; \
746 (elm)->field.tqe_prev = (head)->tqh_last; \
747 *(head)->tqh_last = (elm); \
748 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
749 QMD_TRACE_HEAD(head); \
750 QMD_TRACE_ELEM(&(elm)->field); \
751 } while (0)
752
753 #define TAILQ_LAST(head, headname) \
754 __MISMATCH_TAGS_PUSH \
755 __NULLABILITY_COMPLETENESS_PUSH \
756 (*(((struct headname *)((head)->tqh_last))->tqh_last)) \
757 __NULLABILITY_COMPLETENESS_POP \
758 __MISMATCH_TAGS_POP
759
760 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
761
762 #define TAILQ_PREV(elm, headname, field) \
763 __MISMATCH_TAGS_PUSH \
764 __NULLABILITY_COMPLETENESS_PUSH \
765 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) \
766 __NULLABILITY_COMPLETENESS_POP \
767 __MISMATCH_TAGS_POP
768
769 #define TAILQ_REMOVE(head, elm, field) do { \
770 TAILQ_CHECK_NEXT(elm, field); \
771 TAILQ_CHECK_PREV(elm, field); \
772 if ((TAILQ_NEXT((elm), field)) != NULL) \
773 TAILQ_NEXT((elm), field)->field.tqe_prev = \
774 (elm)->field.tqe_prev; \
775 else { \
776 (head)->tqh_last = (elm)->field.tqe_prev; \
777 QMD_TRACE_HEAD(head); \
778 } \
779 *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
780 TRASHIT((elm)->field.tqe_next); \
781 TRASHIT((elm)->field.tqe_prev); \
782 QMD_TRACE_ELEM(&(elm)->field); \
783 } while (0)
784
785 /*
786 * Why did they switch to spaces for this one macro?
787 */
788 #define TAILQ_SWAP(head1, head2, type, field) \
789 __MISMATCH_TAGS_PUSH \
790 __NULLABILITY_COMPLETENESS_PUSH \
791 do { \
792 struct type *swap_first = (head1)->tqh_first; \
793 struct type **swap_last = (head1)->tqh_last; \
794 (head1)->tqh_first = (head2)->tqh_first; \
795 (head1)->tqh_last = (head2)->tqh_last; \
796 (head2)->tqh_first = swap_first; \
797 (head2)->tqh_last = swap_last; \
798 if ((swap_first = (head1)->tqh_first) != NULL) \
799 swap_first->field.tqe_prev = &(head1)->tqh_first; \
800 else \
801 (head1)->tqh_last = &(head1)->tqh_first; \
802 if ((swap_first = (head2)->tqh_first) != NULL) \
803 swap_first->field.tqe_prev = &(head2)->tqh_first; \
804 else \
805 (head2)->tqh_last = &(head2)->tqh_first; \
806 } while (0) \
807 __NULLABILITY_COMPLETENESS_POP \
808 __MISMATCH_TAGS_POP
809
810 /*
811 * Circular queue definitions.
812 */
813 #define CIRCLEQ_HEAD(name, type) \
814 __MISMATCH_TAGS_PUSH \
815 __NULLABILITY_COMPLETENESS_PUSH \
816 struct name { \
817 struct type *cqh_first; /* first element */ \
818 struct type *cqh_last; /* last element */ \
819 } \
820 __NULLABILITY_COMPLETENESS_POP \
821 __MISMATCH_TAGS_POP
822
823 #define CIRCLEQ_ENTRY(type) \
824 __MISMATCH_TAGS_PUSH \
825 __NULLABILITY_COMPLETENESS_PUSH \
826 struct { \
827 struct type *cqe_next; /* next element */ \
828 struct type *cqe_prev; /* previous element */ \
829 } \
830 __NULLABILITY_COMPLETENESS_POP \
831 __MISMATCH_TAGS_POP
832
833 /*
834 * Circular queue functions.
835 */
836 #ifdef KERNEL_PRIVATE
837 #define CIRCLEQ_CHECK_HEAD(head, field) do { \
838 if (__improbable( \
839 CIRCLEQ_FIRST((head)) != ((void*)(head)) && \
840 CIRCLEQ_FIRST((head))->field.cqe_prev != ((void*)(head)))) \
841 ml_fatal_trap_invalid_list_linkage((uintptr_t)(head)); \
842 } while(0)
843 #define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \
844 if (__improbable( \
845 CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \
846 CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \
847 ml_fatal_trap_invalid_list_linkage((uintptr_t)(elm)); \
848 } while(0)
849 #define CIRCLEQ_CHECK_PREV(head, elm, field) do { \
850 if (__improbable( \
851 CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \
852 CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \
853 ml_fatal_trap_invalid_list_linkage((uintptr_t)(elm)); \
854 } while(0)
855 #else
856 #define CIRCLEQ_CHECK_HEAD(head, field)
857 #define CIRCLEQ_CHECK_NEXT(head, elm, field)
858 #define CIRCLEQ_CHECK_PREV(head, elm, field)
859 #endif /* KERNEL_PRIVATE */
860
861 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
862
863 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
864
865 #define CIRCLEQ_FOREACH(var, head, field) \
866 for((var) = (head)->cqh_first; \
867 (var) != (void *)(head); \
868 (var) = (var)->field.cqe_next)
869
870 #define CIRCLEQ_INIT(head) do { \
871 (head)->cqh_first = (void *)(head); \
872 (head)->cqh_last = (void *)(head); \
873 } while (0)
874
875 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
876 CIRCLEQ_CHECK_NEXT(head, listelm, field); \
877 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
878 (elm)->field.cqe_prev = (listelm); \
879 if ((listelm)->field.cqe_next == (void *)(head)) \
880 (head)->cqh_last = (elm); \
881 else \
882 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
883 (listelm)->field.cqe_next = (elm); \
884 } while (0)
885
886 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
887 CIRCLEQ_CHECK_PREV(head, listelm, field); \
888 (elm)->field.cqe_next = (listelm); \
889 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
890 if ((listelm)->field.cqe_prev == (void *)(head)) \
891 (head)->cqh_first = (elm); \
892 else \
893 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
894 (listelm)->field.cqe_prev = (elm); \
895 } while (0)
896
897 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
898 CIRCLEQ_CHECK_HEAD(head, field); \
899 (elm)->field.cqe_next = (head)->cqh_first; \
900 (elm)->field.cqe_prev = (void *)(head); \
901 if ((head)->cqh_last == (void *)(head)) \
902 (head)->cqh_last = (elm); \
903 else \
904 (head)->cqh_first->field.cqe_prev = (elm); \
905 (head)->cqh_first = (elm); \
906 } while (0)
907
908 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
909 (elm)->field.cqe_next = (void *)(head); \
910 (elm)->field.cqe_prev = (head)->cqh_last; \
911 if ((head)->cqh_first == (void *)(head)) \
912 (head)->cqh_first = (elm); \
913 else \
914 (head)->cqh_last->field.cqe_next = (elm); \
915 (head)->cqh_last = (elm); \
916 } while (0)
917
918 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
919
920 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
921
922 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
923
924 #define CIRCLEQ_REMOVE(head, elm, field) do { \
925 CIRCLEQ_CHECK_NEXT(head, elm, field); \
926 CIRCLEQ_CHECK_PREV(head, elm, field); \
927 if ((elm)->field.cqe_next == (void *)(head)) \
928 (head)->cqh_last = (elm)->field.cqe_prev; \
929 else \
930 (elm)->field.cqe_next->field.cqe_prev = \
931 (elm)->field.cqe_prev; \
932 if ((elm)->field.cqe_prev == (void *)(head)) \
933 (head)->cqh_first = (elm)->field.cqe_next; \
934 else \
935 (elm)->field.cqe_prev->field.cqe_next = \
936 (elm)->field.cqe_next; \
937 } while (0)
938
939 #ifdef _KERNEL
940
941 #if NOTFB31
942
943 /*
944 * XXX insque() and remque() are an old way of handling certain queues.
945 * They bogusly assumes that all queue heads look alike.
946 */
947
948 struct quehead {
949 struct quehead *qh_link;
950 struct quehead *qh_rlink;
951 };
952
953 #ifdef __GNUC__
954 #ifdef KERNEL_PRIVATE
955 static __inline void
chkquenext(void * a)956 chkquenext(void *a)
957 {
958 struct quehead *element = (struct quehead *)a;
959 if (__improbable(element->qh_link != NULL &&
960 element->qh_link->qh_rlink != element)) {
961 ml_fatal_trap_invalid_list_linkage((uintptr_t)(a));
962 }
963 }
964
965 static __inline void
chkqueprev(void * a)966 chkqueprev(void *a)
967 {
968 struct quehead *element = (struct quehead *)a;
969 if (__improbable(element->qh_rlink != NULL &&
970 element->qh_rlink->qh_link != element)) {
971 ml_fatal_trap_invalid_list_linkage((uintptr_t)(a));
972 }
973 }
974 #else /* !KERNEL_PRIVATE */
975 #define chkquenext(a)
976 #define chkqueprev(a)
977 #endif /* KERNEL_PRIVATE */
978
979 static __inline void
insque(void * a,void * b)980 insque(void *a, void *b)
981 {
982 struct quehead *element = (struct quehead *)a,
983 *head = (struct quehead *)b;
984 chkquenext(head);
985
986 element->qh_link = head->qh_link;
987 element->qh_rlink = head;
988 head->qh_link = element;
989 element->qh_link->qh_rlink = element;
990 }
991
992 static __inline void
remque(void * a)993 remque(void *a)
994 {
995 struct quehead *element = (struct quehead *)a;
996 chkquenext(element);
997 chkqueprev(element);
998
999 element->qh_link->qh_rlink = element->qh_rlink;
1000 element->qh_rlink->qh_link = element->qh_link;
1001 element->qh_rlink = 0;
1002 }
1003
1004 #else /* !__GNUC__ */
1005
1006 void insque(void *a, void *b);
1007 void remque(void *a);
1008
1009 #endif /* __GNUC__ */
1010
1011 #endif /* NOTFB31 */
1012 #endif /* _KERNEL */
1013
1014 #endif /* !_SYS_QUEUE_H_ */
1015