1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 
39 #include <machine/stdarg.h>
40 
41 #include <linux/bitmap.h>
42 #include <linux/kobject.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
45 #include <linux/err.h>
46 
47 #define	MAX_IDR_LEVEL	((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
48 #define	MAX_IDR_FREE	(MAX_IDR_LEVEL * 2)
49 
50 struct linux_idr_cache {
51 	spinlock_t lock;
52 	struct idr_layer *head;
53 	unsigned count;
54 };
55 
56 DPCPU_DEFINE_STATIC(struct linux_idr_cache, linux_idr_cache);
57 
58 /*
59  * IDR Implementation.
60  *
61  * This is quick and dirty and not as re-entrant as the linux version
62  * however it should be fairly fast.  It is basically a radix tree with
63  * a builtin bitmap for allocation.
64  */
65 static MALLOC_DEFINE(M_IDR, "idr", "Linux IDR compat");
66 
67 static struct idr_layer *
idr_preload_dequeue_locked(struct linux_idr_cache * lic)68 idr_preload_dequeue_locked(struct linux_idr_cache *lic)
69 {
70 	struct idr_layer *retval;
71 
72 	/* check if wrong thread is trying to dequeue */
73 	if (mtx_owned(&lic->lock) == 0)
74 		return (NULL);
75 
76 	retval = lic->head;
77 	if (likely(retval != NULL)) {
78 		lic->head = retval->ary[0];
79 		lic->count--;
80 		retval->ary[0] = NULL;
81 	}
82 	return (retval);
83 }
84 
85 static void
idr_preload_init(void * arg)86 idr_preload_init(void *arg)
87 {
88 	int cpu;
89 
90 	CPU_FOREACH(cpu) {
91 		struct linux_idr_cache *lic =
92 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
93 
94 		spin_lock_init(&lic->lock);
95 	}
96 }
97 SYSINIT(idr_preload_init, SI_SUB_CPU, SI_ORDER_ANY, idr_preload_init, NULL);
98 
99 static void
idr_preload_uninit(void * arg)100 idr_preload_uninit(void *arg)
101 {
102 	int cpu;
103 
104 	CPU_FOREACH(cpu) {
105 		struct idr_layer *cacheval;
106 		struct linux_idr_cache *lic =
107 		    DPCPU_ID_PTR(cpu, linux_idr_cache);
108 
109 		while (1) {
110 			spin_lock(&lic->lock);
111 			cacheval = idr_preload_dequeue_locked(lic);
112 			spin_unlock(&lic->lock);
113 
114 			if (cacheval == NULL)
115 				break;
116 			free(cacheval, M_IDR);
117 		}
118 		spin_lock_destroy(&lic->lock);
119 	}
120 }
121 SYSUNINIT(idr_preload_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, idr_preload_uninit, NULL);
122 
123 void
idr_preload(gfp_t gfp_mask)124 idr_preload(gfp_t gfp_mask)
125 {
126 	struct linux_idr_cache *lic;
127 	struct idr_layer *cacheval;
128 
129 	sched_pin();
130 
131 	lic = &DPCPU_GET(linux_idr_cache);
132 
133 	/* fill up cache */
134 	spin_lock(&lic->lock);
135 	while (lic->count < MAX_IDR_FREE) {
136 		spin_unlock(&lic->lock);
137 		cacheval = malloc(sizeof(*cacheval), M_IDR, M_ZERO | gfp_mask);
138 		spin_lock(&lic->lock);
139 		if (cacheval == NULL)
140 			break;
141 		cacheval->ary[0] = lic->head;
142 		lic->head = cacheval;
143 		lic->count++;
144 	}
145 }
146 
147 void
idr_preload_end(void)148 idr_preload_end(void)
149 {
150 	struct linux_idr_cache *lic;
151 
152 	lic = &DPCPU_GET(linux_idr_cache);
153 	spin_unlock(&lic->lock);
154 	sched_unpin();
155 }
156 
157 static inline int
idr_max(struct idr * idr)158 idr_max(struct idr *idr)
159 {
160 	return (1 << (idr->layers * IDR_BITS)) - 1;
161 }
162 
163 static inline int
idr_pos(int id,int layer)164 idr_pos(int id, int layer)
165 {
166 	return (id >> (IDR_BITS * layer)) & IDR_MASK;
167 }
168 
169 void
idr_init(struct idr * idr)170 idr_init(struct idr *idr)
171 {
172 	bzero(idr, sizeof(*idr));
173 	mtx_init(&idr->lock, "idr", NULL, MTX_DEF);
174 }
175 
176 /* Only frees cached pages. */
177 void
idr_destroy(struct idr * idr)178 idr_destroy(struct idr *idr)
179 {
180 	struct idr_layer *il, *iln;
181 
182 	/*
183 	 * This idr can be reused, and this function might be called multiple times
184 	 * without a idr_init(). Check if this is the case.  If we do not do this
185 	 * then the mutex will panic while asserting that it is valid.
186 	 */
187 	if (mtx_initialized(&idr->lock) == 0)
188 		return;
189 
190 	idr_remove_all(idr);
191 	mtx_lock(&idr->lock);
192 	for (il = idr->free; il != NULL; il = iln) {
193 		iln = il->ary[0];
194 		free(il, M_IDR);
195 	}
196 	mtx_unlock(&idr->lock);
197 	mtx_destroy(&idr->lock);
198 }
199 
200 static void
idr_remove_layer(struct idr_layer * il,int layer)201 idr_remove_layer(struct idr_layer *il, int layer)
202 {
203 	int i;
204 
205 	if (il == NULL)
206 		return;
207 	if (layer == 0) {
208 		free(il, M_IDR);
209 		return;
210 	}
211 	for (i = 0; i < IDR_SIZE; i++)
212 		if (il->ary[i])
213 			idr_remove_layer(il->ary[i], layer - 1);
214 }
215 
216 void
idr_remove_all(struct idr * idr)217 idr_remove_all(struct idr *idr)
218 {
219 
220 	mtx_lock(&idr->lock);
221 	idr_remove_layer(idr->top, idr->layers - 1);
222 	idr->top = NULL;
223 	idr->layers = 0;
224 	mtx_unlock(&idr->lock);
225 }
226 
227 static void *
idr_remove_locked(struct idr * idr,int id)228 idr_remove_locked(struct idr *idr, int id)
229 {
230 	struct idr_layer *il;
231 	void *res;
232 	int layer;
233 	int idx;
234 
235 	id &= MAX_ID_MASK;
236 	il = idr->top;
237 	layer = idr->layers - 1;
238 	if (il == NULL || id > idr_max(idr))
239 		return (NULL);
240 	/*
241 	 * Walk down the tree to this item setting bitmaps along the way
242 	 * as we know at least one item will be free along this path.
243 	 */
244 	while (layer && il) {
245 		idx = idr_pos(id, layer);
246 		il->bitmap |= 1 << idx;
247 		il = il->ary[idx];
248 		layer--;
249 	}
250 	idx = id & IDR_MASK;
251 	/*
252 	 * At this point we've set free space bitmaps up the whole tree.
253 	 * We could make this non-fatal and unwind but linux dumps a stack
254 	 * and a warning so I don't think it's necessary.
255 	 */
256 	if (il == NULL || (il->bitmap & (1 << idx)) != 0)
257 		panic("idr_remove: Item %d not allocated (%p, %p)\n",
258 		    id, idr, il);
259 	res = il->ary[idx];
260 	il->ary[idx] = NULL;
261 	il->bitmap |= 1 << idx;
262 
263 	return (res);
264 }
265 
266 void *
idr_remove(struct idr * idr,int id)267 idr_remove(struct idr *idr, int id)
268 {
269 	void *res;
270 
271 	mtx_lock(&idr->lock);
272 	res = idr_remove_locked(idr, id);
273 	mtx_unlock(&idr->lock);
274 
275 	return (res);
276 }
277 
278 static inline struct idr_layer *
idr_find_layer_locked(struct idr * idr,int id)279 idr_find_layer_locked(struct idr *idr, int id)
280 {
281 	struct idr_layer *il;
282 	int layer;
283 
284 	id &= MAX_ID_MASK;
285 	il = idr->top;
286 	layer = idr->layers - 1;
287 	if (il == NULL || id > idr_max(idr))
288 		return (NULL);
289 	while (layer && il) {
290 		il = il->ary[idr_pos(id, layer)];
291 		layer--;
292 	}
293 	return (il);
294 }
295 
296 void *
idr_replace(struct idr * idr,void * ptr,int id)297 idr_replace(struct idr *idr, void *ptr, int id)
298 {
299 	struct idr_layer *il;
300 	void *res;
301 	int idx;
302 
303 	mtx_lock(&idr->lock);
304 	il = idr_find_layer_locked(idr, id);
305 	idx = id & IDR_MASK;
306 
307 	/* Replace still returns an error if the item was not allocated. */
308 	if (il == NULL || (il->bitmap & (1 << idx))) {
309 		res = ERR_PTR(-ENOENT);
310 	} else {
311 		res = il->ary[idx];
312 		il->ary[idx] = ptr;
313 	}
314 	mtx_unlock(&idr->lock);
315 	return (res);
316 }
317 
318 static inline void *
idr_find_locked(struct idr * idr,int id)319 idr_find_locked(struct idr *idr, int id)
320 {
321 	struct idr_layer *il;
322 	void *res;
323 
324 	mtx_assert(&idr->lock, MA_OWNED);
325 	il = idr_find_layer_locked(idr, id);
326 	if (il != NULL)
327 		res = il->ary[id & IDR_MASK];
328 	else
329 		res = NULL;
330 	return (res);
331 }
332 
333 void *
idr_find(struct idr * idr,int id)334 idr_find(struct idr *idr, int id)
335 {
336 	void *res;
337 
338 	mtx_lock(&idr->lock);
339 	res = idr_find_locked(idr, id);
340 	mtx_unlock(&idr->lock);
341 	return (res);
342 }
343 
344 void *
idr_get_next(struct idr * idr,int * nextidp)345 idr_get_next(struct idr *idr, int *nextidp)
346 {
347 	void *res = NULL;
348 	int id = *nextidp;
349 
350 	mtx_lock(&idr->lock);
351 	for (; id <= idr_max(idr); id++) {
352 		res = idr_find_locked(idr, id);
353 		if (res == NULL)
354 			continue;
355 		*nextidp = id;
356 		break;
357 	}
358 	mtx_unlock(&idr->lock);
359 	return (res);
360 }
361 
362 int
idr_pre_get(struct idr * idr,gfp_t gfp_mask)363 idr_pre_get(struct idr *idr, gfp_t gfp_mask)
364 {
365 	struct idr_layer *il, *iln;
366 	struct idr_layer *head;
367 	int need;
368 
369 	mtx_lock(&idr->lock);
370 	for (;;) {
371 		need = idr->layers + 1;
372 		for (il = idr->free; il != NULL; il = il->ary[0])
373 			need--;
374 		mtx_unlock(&idr->lock);
375 		if (need <= 0)
376 			break;
377 		for (head = NULL; need; need--) {
378 			iln = malloc(sizeof(*il), M_IDR, M_ZERO | gfp_mask);
379 			if (iln == NULL)
380 				break;
381 			bitmap_fill(&iln->bitmap, IDR_SIZE);
382 			if (head != NULL) {
383 				il->ary[0] = iln;
384 				il = iln;
385 			} else
386 				head = il = iln;
387 		}
388 		if (head == NULL)
389 			return (0);
390 		mtx_lock(&idr->lock);
391 		il->ary[0] = idr->free;
392 		idr->free = head;
393 	}
394 	return (1);
395 }
396 
397 static struct idr_layer *
idr_free_list_get(struct idr * idp)398 idr_free_list_get(struct idr *idp)
399 {
400 	struct idr_layer *il;
401 
402 	if ((il = idp->free) != NULL) {
403 		idp->free = il->ary[0];
404 		il->ary[0] = NULL;
405 	}
406 	return (il);
407 }
408 
409 static inline struct idr_layer *
idr_get(struct idr * idp)410 idr_get(struct idr *idp)
411 {
412 	struct idr_layer *il;
413 
414 	if ((il = idr_free_list_get(idp)) != NULL) {
415 		MPASS(il->bitmap != 0);
416 	} else if ((il = malloc(sizeof(*il), M_IDR, M_ZERO | M_NOWAIT)) != NULL) {
417 		bitmap_fill(&il->bitmap, IDR_SIZE);
418 	} else if ((il = idr_preload_dequeue_locked(&DPCPU_GET(linux_idr_cache))) != NULL) {
419 		bitmap_fill(&il->bitmap, IDR_SIZE);
420 	} else {
421 		return (NULL);
422 	}
423 	return (il);
424 }
425 
426 /*
427  * Could be implemented as get_new_above(idr, ptr, 0, idp) but written
428  * first for simplicity sake.
429  */
430 static int
idr_get_new_locked(struct idr * idr,void * ptr,int * idp)431 idr_get_new_locked(struct idr *idr, void *ptr, int *idp)
432 {
433 	struct idr_layer *stack[MAX_LEVEL];
434 	struct idr_layer *il;
435 	int error;
436 	int layer;
437 	int idx;
438 	int id;
439 
440 	mtx_assert(&idr->lock, MA_OWNED);
441 
442 	error = -EAGAIN;
443 	/*
444 	 * Expand the tree until there is free space.
445 	 */
446 	if (idr->top == NULL || idr->top->bitmap == 0) {
447 		if (idr->layers == MAX_LEVEL + 1) {
448 			error = -ENOSPC;
449 			goto out;
450 		}
451 		il = idr_get(idr);
452 		if (il == NULL)
453 			goto out;
454 		il->ary[0] = idr->top;
455 		if (idr->top)
456 			il->bitmap &= ~1;
457 		idr->top = il;
458 		idr->layers++;
459 	}
460 	il = idr->top;
461 	id = 0;
462 	/*
463 	 * Walk the tree following free bitmaps, record our path.
464 	 */
465 	for (layer = idr->layers - 1;; layer--) {
466 		stack[layer] = il;
467 		idx = ffsl(il->bitmap);
468 		if (idx == 0)
469 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
470 			    idr, il);
471 		idx--;
472 		id |= idx << (layer * IDR_BITS);
473 		if (layer == 0)
474 			break;
475 		if (il->ary[idx] == NULL) {
476 			il->ary[idx] = idr_get(idr);
477 			if (il->ary[idx] == NULL)
478 				goto out;
479 		}
480 		il = il->ary[idx];
481 	}
482 	/*
483 	 * Allocate the leaf to the consumer.
484 	 */
485 	il->bitmap &= ~(1 << idx);
486 	il->ary[idx] = ptr;
487 	*idp = id;
488 	/*
489 	 * Clear bitmaps potentially up to the root.
490 	 */
491 	while (il->bitmap == 0 && ++layer < idr->layers) {
492 		il = stack[layer];
493 		il->bitmap &= ~(1 << idr_pos(id, layer));
494 	}
495 	error = 0;
496 out:
497 #ifdef INVARIANTS
498 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
499 		panic("idr_get_new: Failed for idr %p, id %d, ptr %p\n",
500 		    idr, id, ptr);
501 	}
502 #endif
503 	return (error);
504 }
505 
506 int
idr_get_new(struct idr * idr,void * ptr,int * idp)507 idr_get_new(struct idr *idr, void *ptr, int *idp)
508 {
509 	int retval;
510 
511 	mtx_lock(&idr->lock);
512 	retval = idr_get_new_locked(idr, ptr, idp);
513 	mtx_unlock(&idr->lock);
514 	return (retval);
515 }
516 
517 static int
idr_get_new_above_locked(struct idr * idr,void * ptr,int starting_id,int * idp)518 idr_get_new_above_locked(struct idr *idr, void *ptr, int starting_id, int *idp)
519 {
520 	struct idr_layer *stack[MAX_LEVEL];
521 	struct idr_layer *il;
522 	int error;
523 	int layer;
524 	int idx, sidx;
525 	int id;
526 
527 	mtx_assert(&idr->lock, MA_OWNED);
528 
529 	error = -EAGAIN;
530 	/*
531 	 * Compute the layers required to support starting_id and the mask
532 	 * at the top layer.
533 	 */
534 restart:
535 	idx = starting_id;
536 	layer = 0;
537 	while (idx & ~IDR_MASK) {
538 		layer++;
539 		idx >>= IDR_BITS;
540 	}
541 	if (layer == MAX_LEVEL + 1) {
542 		error = -ENOSPC;
543 		goto out;
544 	}
545 	/*
546 	 * Expand the tree until there is free space at or beyond starting_id.
547 	 */
548 	while (idr->layers <= layer ||
549 	    idr->top->bitmap < (1 << idr_pos(starting_id, idr->layers - 1))) {
550 		if (idr->layers == MAX_LEVEL + 1) {
551 			error = -ENOSPC;
552 			goto out;
553 		}
554 		il = idr_get(idr);
555 		if (il == NULL)
556 			goto out;
557 		il->ary[0] = idr->top;
558 		if (idr->top && idr->top->bitmap == 0)
559 			il->bitmap &= ~1;
560 		idr->top = il;
561 		idr->layers++;
562 	}
563 	il = idr->top;
564 	id = 0;
565 	/*
566 	 * Walk the tree following free bitmaps, record our path.
567 	 */
568 	for (layer = idr->layers - 1;; layer--) {
569 		stack[layer] = il;
570 		sidx = idr_pos(starting_id, layer);
571 		/* Returns index numbered from 0 or size if none exists. */
572 		idx = find_next_bit(&il->bitmap, IDR_SIZE, sidx);
573 		if (idx == IDR_SIZE && sidx == 0)
574 			panic("idr_get_new: Invalid leaf state (%p, %p)\n",
575 			    idr, il);
576 		/*
577 		 * We may have walked a path where there was a free bit but
578 		 * it was lower than what we wanted.  Restart the search with
579 		 * a larger starting id.  id contains the progress we made so
580 		 * far.  Search the leaf one above this level.  This may
581 		 * restart as many as MAX_LEVEL times but that is expected
582 		 * to be rare.
583 		 */
584 		if (idx == IDR_SIZE) {
585 			starting_id = id + (1 << ((layer + 1) * IDR_BITS));
586 			goto restart;
587 		}
588 		if (idx > sidx)
589 			starting_id = 0;	/* Search the whole subtree. */
590 		id |= idx << (layer * IDR_BITS);
591 		if (layer == 0)
592 			break;
593 		if (il->ary[idx] == NULL) {
594 			il->ary[idx] = idr_get(idr);
595 			if (il->ary[idx] == NULL)
596 				goto out;
597 		}
598 		il = il->ary[idx];
599 	}
600 	/*
601 	 * Allocate the leaf to the consumer.
602 	 */
603 	il->bitmap &= ~(1 << idx);
604 	il->ary[idx] = ptr;
605 	*idp = id;
606 	/*
607 	 * Clear bitmaps potentially up to the root.
608 	 */
609 	while (il->bitmap == 0 && ++layer < idr->layers) {
610 		il = stack[layer];
611 		il->bitmap &= ~(1 << idr_pos(id, layer));
612 	}
613 	error = 0;
614 out:
615 #ifdef INVARIANTS
616 	if (error == 0 && idr_find_locked(idr, id) != ptr) {
617 		panic("idr_get_new_above: Failed for idr %p, id %d, ptr %p\n",
618 		    idr, id, ptr);
619 	}
620 #endif
621 	return (error);
622 }
623 
624 int
idr_get_new_above(struct idr * idr,void * ptr,int starting_id,int * idp)625 idr_get_new_above(struct idr *idr, void *ptr, int starting_id, int *idp)
626 {
627 	int retval;
628 
629 	mtx_lock(&idr->lock);
630 	retval = idr_get_new_above_locked(idr, ptr, starting_id, idp);
631 	mtx_unlock(&idr->lock);
632 	return (retval);
633 }
634 
635 int
ida_get_new_above(struct ida * ida,int starting_id,int * p_id)636 ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
637 {
638 	return (idr_get_new_above(&ida->idr, NULL, starting_id, p_id));
639 }
640 
641 static int
idr_alloc_locked(struct idr * idr,void * ptr,int start,int end)642 idr_alloc_locked(struct idr *idr, void *ptr, int start, int end)
643 {
644 	int max = end > 0 ? end - 1 : INT_MAX;
645 	int error;
646 	int id;
647 
648 	mtx_assert(&idr->lock, MA_OWNED);
649 
650 	if (unlikely(start < 0))
651 		return (-EINVAL);
652 	if (unlikely(max < start))
653 		return (-ENOSPC);
654 
655 	if (start == 0)
656 		error = idr_get_new_locked(idr, ptr, &id);
657 	else
658 		error = idr_get_new_above_locked(idr, ptr, start, &id);
659 
660 	if (unlikely(error < 0))
661 		return (error);
662 	if (unlikely(id > max)) {
663 		idr_remove_locked(idr, id);
664 		return (-ENOSPC);
665 	}
666 	return (id);
667 }
668 
669 int
idr_alloc(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)670 idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
671 {
672 	int retval;
673 
674 	mtx_lock(&idr->lock);
675 	retval = idr_alloc_locked(idr, ptr, start, end);
676 	mtx_unlock(&idr->lock);
677 	return (retval);
678 }
679 
680 int
idr_alloc_cyclic(struct idr * idr,void * ptr,int start,int end,gfp_t gfp_mask)681 idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
682 {
683 	int retval;
684 
685 	mtx_lock(&idr->lock);
686 	retval = idr_alloc_locked(idr, ptr, max(start, idr->next_cyclic_id), end);
687 	if (unlikely(retval == -ENOSPC))
688 		retval = idr_alloc_locked(idr, ptr, start, end);
689 	if (likely(retval >= 0))
690 		idr->next_cyclic_id = retval + 1;
691 	mtx_unlock(&idr->lock);
692 	return (retval);
693 }
694 
695 static int
idr_for_each_layer(struct idr_layer * il,int offset,int layer,int (* f)(int id,void * p,void * data),void * data)696 idr_for_each_layer(struct idr_layer *il, int offset, int layer,
697     int (*f)(int id, void *p, void *data), void *data)
698 {
699 	int i, err;
700 
701 	if (il == NULL)
702 		return (0);
703 	if (layer == 0) {
704 		for (i = 0; i < IDR_SIZE; i++) {
705 			if (il->ary[i] == NULL)
706 				continue;
707 			err = f(i + offset, il->ary[i],  data);
708 			if (err)
709 				return (err);
710 		}
711 		return (0);
712 	}
713 	for (i = 0; i < IDR_SIZE; i++) {
714 		if (il->ary[i] == NULL)
715 			continue;
716 		err = idr_for_each_layer(il->ary[i],
717 		    (i + offset) * IDR_SIZE, layer - 1, f, data);
718 		if (err)
719 			return (err);
720 	}
721 	return (0);
722 }
723 
724 /* NOTE: It is not allowed to modify the IDR tree while it is being iterated */
725 int
idr_for_each(struct idr * idp,int (* f)(int id,void * p,void * data),void * data)726 idr_for_each(struct idr *idp, int (*f)(int id, void *p, void *data), void *data)
727 {
728 	return (idr_for_each_layer(idp->top, 0, idp->layers - 1, f, data));
729 }
730 
731 static int
idr_has_entry(int id,void * p,void * data)732 idr_has_entry(int id, void *p, void *data)
733 {
734 
735 	return (1);
736 }
737 
738 bool
idr_is_empty(struct idr * idp)739 idr_is_empty(struct idr *idp)
740 {
741 
742 	return (idr_for_each(idp, idr_has_entry, NULL) == 0);
743 }
744 
745 int
ida_pre_get(struct ida * ida,gfp_t flags)746 ida_pre_get(struct ida *ida, gfp_t flags)
747 {
748 	if (idr_pre_get(&ida->idr, flags) == 0)
749 		return (0);
750 
751 	if (ida->free_bitmap == NULL) {
752 		ida->free_bitmap =
753 		    malloc(sizeof(struct ida_bitmap), M_IDR, flags);
754 	}
755 	return (ida->free_bitmap != NULL);
756 }
757 
758 int
ida_simple_get(struct ida * ida,unsigned int start,unsigned int end,gfp_t flags)759 ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
760     gfp_t flags)
761 {
762 	int ret, id;
763 	unsigned int max;
764 
765 	MPASS((int)start >= 0);
766 
767 	if ((int)end <= 0)
768 		max = INT_MAX;
769 	else {
770 		MPASS(end > start);
771 		max = end - 1;
772 	}
773 again:
774 	if (!ida_pre_get(ida, flags))
775 		return (-ENOMEM);
776 
777 	if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
778 		if (id > max) {
779 			ida_remove(ida, id);
780 			ret = -ENOSPC;
781 		} else {
782 			ret = id;
783 		}
784 	}
785 	if (__predict_false(ret == -EAGAIN))
786 		goto again;
787 
788 	return (ret);
789 }
790 
791 void
ida_simple_remove(struct ida * ida,unsigned int id)792 ida_simple_remove(struct ida *ida, unsigned int id)
793 {
794 	idr_remove(&ida->idr, id);
795 }
796 
797 void
ida_remove(struct ida * ida,int id)798 ida_remove(struct ida *ida, int id)
799 {
800 	idr_remove(&ida->idr, id);
801 }
802 
803 void
ida_init(struct ida * ida)804 ida_init(struct ida *ida)
805 {
806 	idr_init(&ida->idr);
807 }
808 
809 void
ida_destroy(struct ida * ida)810 ida_destroy(struct ida *ida)
811 {
812 	idr_destroy(&ida->idr);
813 	free(ida->free_bitmap, M_IDR);
814 	ida->free_bitmap = NULL;
815 }
816