xref: /linux-6.15/include/linux/idr.h (revision aff985fd)
1 /*
2  * include/linux/idr.h
3  *
4  * 2002-10-18  written by Jim Houston [email protected]
5  *	Copyright (C) 2002 by Concurrent Computer Corporation
6  *	Distributed under the GNU GPL license version 2.
7  *
8  * Small id to pointer translation service avoiding fixed sized
9  * tables.
10  */
11 
12 #ifndef __IDR_H__
13 #define __IDR_H__
14 
15 #include <linux/types.h>
16 #include <linux/bitops.h>
17 #include <linux/init.h>
18 #include <linux/rcupdate.h>
19 
20 /*
21  * Using 6 bits at each layer allows us to allocate 7 layers out of each page.
22  * 8 bits only gave us 3 layers out of every pair of pages, which is less
23  * efficient except for trees with a largest element between 192-255 inclusive.
24  */
25 #define IDR_BITS 6
26 #define IDR_SIZE (1 << IDR_BITS)
27 #define IDR_MASK ((1 << IDR_BITS)-1)
28 
29 struct idr_layer {
30 	int			prefix;	/* the ID prefix of this idr_layer */
31 	int			layer;	/* distance from leaf */
32 	struct idr_layer __rcu	*ary[1<<IDR_BITS];
33 	int			count;	/* When zero, we can release it */
34 	union {
35 		/* A zero bit means "space here" */
36 		DECLARE_BITMAP(bitmap, IDR_SIZE);
37 		struct rcu_head		rcu_head;
38 	};
39 };
40 
41 struct idr {
42 	struct idr_layer __rcu	*hint;	/* the last layer allocated from */
43 	struct idr_layer __rcu	*top;
44 	int			layers;	/* only valid w/o concurrent changes */
45 	int			cur;	/* current pos for cyclic allocation */
46 	spinlock_t		lock;
47 	int			id_free_cnt;
48 	struct idr_layer	*id_free;
49 };
50 
51 #define IDR_INIT(name)							\
52 {									\
53 	.lock			= __SPIN_LOCK_UNLOCKED(name.lock),	\
54 }
55 #define DEFINE_IDR(name)	struct idr name = IDR_INIT(name)
56 
57 /**
58  * idr_get_cursor - Return the current position of the cyclic allocator
59  * @idr: idr handle
60  *
61  * The value returned is the value that will be next returned from
62  * idr_alloc_cyclic() if it is free (otherwise the search will start from
63  * this position).
64  */
65 static inline unsigned int idr_get_cursor(struct idr *idr)
66 {
67 	return READ_ONCE(idr->cur);
68 }
69 
70 /**
71  * idr_set_cursor - Set the current position of the cyclic allocator
72  * @idr: idr handle
73  * @val: new position
74  *
75  * The next call to idr_alloc_cyclic() will return @val if it is free
76  * (otherwise the search will start from this position).
77  */
78 static inline void idr_set_cursor(struct idr *idr, unsigned int val)
79 {
80 	WRITE_ONCE(idr->cur, val);
81 }
82 
83 /**
84  * DOC: idr sync
85  * idr synchronization (stolen from radix-tree.h)
86  *
87  * idr_find() is able to be called locklessly, using RCU. The caller must
88  * ensure calls to this function are made within rcu_read_lock() regions.
89  * Other readers (lock-free or otherwise) and modifications may be running
90  * concurrently.
91  *
92  * It is still required that the caller manage the synchronization and
93  * lifetimes of the items. So if RCU lock-free lookups are used, typically
94  * this would mean that the items have their own locks, or are amenable to
95  * lock-free access; and that the items are freed by RCU (or only freed after
96  * having been deleted from the idr tree *and* a synchronize_rcu() grace
97  * period).
98  */
99 
100 /*
101  * This is what we export.
102  */
103 
104 void *idr_find_slowpath(struct idr *idp, int id);
105 void idr_preload(gfp_t gfp_mask);
106 int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
107 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
108 int idr_for_each(struct idr *idp,
109 		 int (*fn)(int id, void *p, void *data), void *data);
110 void *idr_get_next(struct idr *idp, int *nextid);
111 void *idr_replace(struct idr *idp, void *ptr, int id);
112 void idr_remove(struct idr *idp, int id);
113 void idr_destroy(struct idr *idp);
114 void idr_init(struct idr *idp);
115 bool idr_is_empty(struct idr *idp);
116 
117 /**
118  * idr_preload_end - end preload section started with idr_preload()
119  *
120  * Each idr_preload() should be matched with an invocation of this
121  * function.  See idr_preload() for details.
122  */
123 static inline void idr_preload_end(void)
124 {
125 	preempt_enable();
126 }
127 
128 /**
129  * idr_find - return pointer for given id
130  * @idr: idr handle
131  * @id: lookup key
132  *
133  * Return the pointer given the id it has been registered with.  A %NULL
134  * return indicates that @id is not valid or you passed %NULL in
135  * idr_get_new().
136  *
137  * This function can be called under rcu_read_lock(), given that the leaf
138  * pointers lifetimes are correctly managed.
139  */
140 static inline void *idr_find(struct idr *idr, int id)
141 {
142 	struct idr_layer *hint = rcu_dereference_raw(idr->hint);
143 
144 	if (hint && (id & ~IDR_MASK) == hint->prefix)
145 		return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
146 
147 	return idr_find_slowpath(idr, id);
148 }
149 
150 /**
151  * idr_for_each_entry - iterate over an idr's elements of a given type
152  * @idp:     idr handle
153  * @entry:   the type * to use as cursor
154  * @id:      id entry's key
155  *
156  * @entry and @id do not need to be initialized before the loop, and
157  * after normal terminatinon @entry is left with the value NULL.  This
158  * is convenient for a "not found" value.
159  */
160 #define idr_for_each_entry(idp, entry, id)			\
161 	for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
162 
163 /**
164  * idr_for_each_entry - continue iteration over an idr's elements of a given type
165  * @idp:     idr handle
166  * @entry:   the type * to use as cursor
167  * @id:      id entry's key
168  *
169  * Continue to iterate over list of given type, continuing after
170  * the current position.
171  */
172 #define idr_for_each_entry_continue(idp, entry, id)			\
173 	for ((entry) = idr_get_next((idp), &(id));			\
174 	     entry;							\
175 	     ++id, (entry) = idr_get_next((idp), &(id)))
176 
177 /*
178  * IDA - IDR based id allocator, use when translation from id to
179  * pointer isn't necessary.
180  *
181  * IDA_BITMAP_LONGS is calculated to be one less to accommodate
182  * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
183  */
184 #define IDA_CHUNK_SIZE		128	/* 128 bytes per chunk */
185 #define IDA_BITMAP_LONGS	(IDA_CHUNK_SIZE / sizeof(long) - 1)
186 #define IDA_BITMAP_BITS 	(IDA_BITMAP_LONGS * sizeof(long) * 8)
187 
188 struct ida_bitmap {
189 	long			nr_busy;
190 	unsigned long		bitmap[IDA_BITMAP_LONGS];
191 };
192 
193 struct ida {
194 	struct idr		idr;
195 	struct ida_bitmap	*free_bitmap;
196 };
197 
198 #define IDA_INIT(name)		{ .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
199 #define DEFINE_IDA(name)	struct ida name = IDA_INIT(name)
200 
201 int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
202 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
203 void ida_remove(struct ida *ida, int id);
204 void ida_destroy(struct ida *ida);
205 void ida_init(struct ida *ida);
206 
207 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
208 		   gfp_t gfp_mask);
209 void ida_simple_remove(struct ida *ida, unsigned int id);
210 
211 /**
212  * ida_get_new - allocate new ID
213  * @ida:	idr handle
214  * @p_id:	pointer to the allocated handle
215  *
216  * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
217  */
218 static inline int ida_get_new(struct ida *ida, int *p_id)
219 {
220 	return ida_get_new_above(ida, 0, p_id);
221 }
222 
223 static inline bool ida_is_empty(struct ida *ida)
224 {
225 	return idr_is_empty(&ida->idr);
226 }
227 
228 void __init idr_init_cache(void);
229 
230 #endif /* __IDR_H__ */
231