xref: /linux-6.15/include/linux/sbitmap.h (revision c548e62b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Fast and scalable bitmaps.
4  *
5  * Copyright (C) 2016 Facebook
6  * Copyright (C) 2013-2014 Jens Axboe
7  */
8 
9 #ifndef __LINUX_SCALE_BITMAP_H
10 #define __LINUX_SCALE_BITMAP_H
11 
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 
15 struct seq_file;
16 
17 /**
18  * struct sbitmap_word - Word in a &struct sbitmap.
19  */
20 struct sbitmap_word {
21 	/**
22 	 * @depth: Number of bits being used in @word/@cleared
23 	 */
24 	unsigned long depth;
25 
26 	/**
27 	 * @word: word holding free bits
28 	 */
29 	unsigned long word ____cacheline_aligned_in_smp;
30 
31 	/**
32 	 * @cleared: word holding cleared bits
33 	 */
34 	unsigned long cleared ____cacheline_aligned_in_smp;
35 } ____cacheline_aligned_in_smp;
36 
37 /**
38  * struct sbitmap - Scalable bitmap.
39  *
40  * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
41  * trades off higher memory usage for better scalability.
42  */
43 struct sbitmap {
44 	/**
45 	 * @depth: Number of bits used in the whole bitmap.
46 	 */
47 	unsigned int depth;
48 
49 	/**
50 	 * @shift: log2(number of bits used per word)
51 	 */
52 	unsigned int shift;
53 
54 	/**
55 	 * @map_nr: Number of words (cachelines) being used for the bitmap.
56 	 */
57 	unsigned int map_nr;
58 
59 	/**
60 	 * @round_robin: Allocate bits in strict round-robin order.
61 	 */
62 	bool round_robin;
63 
64 	/**
65 	 * @map: Allocated bitmap.
66 	 */
67 	struct sbitmap_word *map;
68 
69 	/*
70 	 * @alloc_hint: Cache of last successfully allocated or freed bit.
71 	 *
72 	 * This is per-cpu, which allows multiple users to stick to different
73 	 * cachelines until the map is exhausted.
74 	 */
75 	unsigned int __percpu *alloc_hint;
76 };
77 
78 #define SBQ_WAIT_QUEUES 8
79 #define SBQ_WAKE_BATCH 8
80 
81 /**
82  * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
83  */
84 struct sbq_wait_state {
85 	/**
86 	 * @wait_cnt: Number of frees remaining before we wake up.
87 	 */
88 	atomic_t wait_cnt;
89 
90 	/**
91 	 * @wait: Wait queue.
92 	 */
93 	wait_queue_head_t wait;
94 } ____cacheline_aligned_in_smp;
95 
96 /**
97  * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
98  * bits.
99  *
100  * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
101  * avoid contention on the wait queue spinlock. This ensures that we don't hit a
102  * scalability wall when we run out of free bits and have to start putting tasks
103  * to sleep.
104  */
105 struct sbitmap_queue {
106 	/**
107 	 * @sb: Scalable bitmap.
108 	 */
109 	struct sbitmap sb;
110 
111 	/**
112 	 * @wake_batch: Number of bits which must be freed before we wake up any
113 	 * waiters.
114 	 */
115 	unsigned int wake_batch;
116 
117 	/**
118 	 * @wake_index: Next wait queue in @ws to wake up.
119 	 */
120 	atomic_t wake_index;
121 
122 	/**
123 	 * @ws: Wait queues.
124 	 */
125 	struct sbq_wait_state *ws;
126 
127 	/*
128 	 * @ws_active: count of currently active ws waitqueues
129 	 */
130 	atomic_t ws_active;
131 
132 	/**
133 	 * @min_shallow_depth: The minimum shallow depth which may be passed to
134 	 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
135 	 */
136 	unsigned int min_shallow_depth;
137 };
138 
139 /**
140  * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
141  * @sb: Bitmap to initialize.
142  * @depth: Number of bits to allocate.
143  * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
144  *         given, a good default is chosen.
145  * @flags: Allocation flags.
146  * @node: Memory node to allocate on.
147  * @round_robin: If true, be stricter about allocation order; always allocate
148  *               starting from the last allocated bit. This is less efficient
149  *               than the default behavior (false).
150  * @alloc_hint: If true, apply percpu hint for where to start searching for
151  *              a free bit.
152  *
153  * Return: Zero on success or negative errno on failure.
154  */
155 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
156 		      gfp_t flags, int node, bool round_robin, bool alloc_hint);
157 
158 /**
159  * sbitmap_free() - Free memory used by a &struct sbitmap.
160  * @sb: Bitmap to free.
161  */
162 static inline void sbitmap_free(struct sbitmap *sb)
163 {
164 	free_percpu(sb->alloc_hint);
165 	kfree(sb->map);
166 	sb->map = NULL;
167 }
168 
169 /**
170  * sbitmap_resize() - Resize a &struct sbitmap.
171  * @sb: Bitmap to resize.
172  * @depth: New number of bits to resize to.
173  *
174  * Doesn't reallocate anything. It's up to the caller to ensure that the new
175  * depth doesn't exceed the depth that the sb was initialized with.
176  */
177 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
178 
179 /**
180  * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
181  * @sb: Bitmap to allocate from.
182  *
183  * This operation provides acquire barrier semantics if it succeeds.
184  *
185  * Return: Non-negative allocated bit number if successful, -1 otherwise.
186  */
187 int sbitmap_get(struct sbitmap *sb);
188 
189 /**
190  * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
191  * limiting the depth used from each word.
192  * @sb: Bitmap to allocate from.
193  * @shallow_depth: The maximum number of bits to allocate from a single word.
194  *
195  * This rather specific operation allows for having multiple users with
196  * different allocation limits. E.g., there can be a high-priority class that
197  * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
198  * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
199  * class can only allocate half of the total bits in the bitmap, preventing it
200  * from starving out the high-priority class.
201  *
202  * Return: Non-negative allocated bit number if successful, -1 otherwise.
203  */
204 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
205 
206 /**
207  * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
208  * @sb: Bitmap to check.
209  *
210  * Return: true if any bit in the bitmap is set, false otherwise.
211  */
212 bool sbitmap_any_bit_set(const struct sbitmap *sb);
213 
214 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
215 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
216 
217 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
218 
219 /**
220  * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
221  * @start: Where to start the iteration.
222  * @sb: Bitmap to iterate over.
223  * @fn: Callback. Should return true to continue or false to break early.
224  * @data: Pointer to pass to callback.
225  *
226  * This is inline even though it's non-trivial so that the function calls to the
227  * callback will hopefully get optimized away.
228  */
229 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
230 					  unsigned int start,
231 					  sb_for_each_fn fn, void *data)
232 {
233 	unsigned int index;
234 	unsigned int nr;
235 	unsigned int scanned = 0;
236 
237 	if (start >= sb->depth)
238 		start = 0;
239 	index = SB_NR_TO_INDEX(sb, start);
240 	nr = SB_NR_TO_BIT(sb, start);
241 
242 	while (scanned < sb->depth) {
243 		unsigned long word;
244 		unsigned int depth = min_t(unsigned int,
245 					   sb->map[index].depth - nr,
246 					   sb->depth - scanned);
247 
248 		scanned += depth;
249 		word = sb->map[index].word & ~sb->map[index].cleared;
250 		if (!word)
251 			goto next;
252 
253 		/*
254 		 * On the first iteration of the outer loop, we need to add the
255 		 * bit offset back to the size of the word for find_next_bit().
256 		 * On all other iterations, nr is zero, so this is a noop.
257 		 */
258 		depth += nr;
259 		while (1) {
260 			nr = find_next_bit(&word, depth, nr);
261 			if (nr >= depth)
262 				break;
263 			if (!fn(sb, (index << sb->shift) + nr, data))
264 				return;
265 
266 			nr++;
267 		}
268 next:
269 		nr = 0;
270 		if (++index >= sb->map_nr)
271 			index = 0;
272 	}
273 }
274 
275 /**
276  * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
277  * @sb: Bitmap to iterate over.
278  * @fn: Callback. Should return true to continue or false to break early.
279  * @data: Pointer to pass to callback.
280  */
281 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
282 					void *data)
283 {
284 	__sbitmap_for_each_set(sb, 0, fn, data);
285 }
286 
287 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
288 					    unsigned int bitnr)
289 {
290 	return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
291 }
292 
293 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
294 
295 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
296 {
297 	set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
298 }
299 
300 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
301 {
302 	clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
303 }
304 
305 /*
306  * This one is special, since it doesn't actually clear the bit, rather it
307  * sets the corresponding bit in the ->cleared mask instead. Paired with
308  * the caller doing sbitmap_deferred_clear() if a given index is full, which
309  * will clear the previously freed entries in the corresponding ->word.
310  */
311 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
312 {
313 	unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
314 
315 	set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
316 }
317 
318 /*
319  * Pair of sbitmap_get, and this one applies both cleared bit and
320  * allocation hint.
321  */
322 static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
323 {
324 	sbitmap_deferred_clear_bit(sb, bitnr);
325 
326 	if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
327 		*this_cpu_ptr(sb->alloc_hint) = bitnr;
328 }
329 
330 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
331 {
332 	return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
333 }
334 
335 /**
336  * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
337  * @sb: Bitmap to show.
338  * @m: struct seq_file to write to.
339  *
340  * This is intended for debugging. The format may change at any time.
341  */
342 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
343 
344 /**
345  * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
346  * seq_file.
347  * @sb: Bitmap to show.
348  * @m: struct seq_file to write to.
349  *
350  * This is intended for debugging. The output isn't guaranteed to be internally
351  * consistent.
352  */
353 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
354 
355 /**
356  * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
357  * memory node.
358  * @sbq: Bitmap queue to initialize.
359  * @depth: See sbitmap_init_node().
360  * @shift: See sbitmap_init_node().
361  * @round_robin: See sbitmap_get().
362  * @flags: Allocation flags.
363  * @node: Memory node to allocate on.
364  *
365  * Return: Zero on success or negative errno on failure.
366  */
367 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
368 			    int shift, bool round_robin, gfp_t flags, int node);
369 
370 /**
371  * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
372  *
373  * @sbq: Bitmap queue to free.
374  */
375 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
376 {
377 	kfree(sbq->ws);
378 	sbitmap_free(&sbq->sb);
379 }
380 
381 /**
382  * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
383  * @sbq: Bitmap queue to resize.
384  * @depth: New number of bits to resize to.
385  *
386  * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
387  * some extra work on the &struct sbitmap_queue, so it's not safe to just
388  * resize the underlying &struct sbitmap.
389  */
390 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
391 
392 /**
393  * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
394  * sbitmap_queue with preemption already disabled.
395  * @sbq: Bitmap queue to allocate from.
396  *
397  * Return: Non-negative allocated bit number if successful, -1 otherwise.
398  */
399 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
400 
401 /**
402  * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
403  * sbitmap_queue, limiting the depth used from each word, with preemption
404  * already disabled.
405  * @sbq: Bitmap queue to allocate from.
406  * @shallow_depth: The maximum number of bits to allocate from a single word.
407  * See sbitmap_get_shallow().
408  *
409  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
410  * initializing @sbq.
411  *
412  * Return: Non-negative allocated bit number if successful, -1 otherwise.
413  */
414 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
415 				unsigned int shallow_depth);
416 
417 /**
418  * sbitmap_queue_get() - Try to allocate a free bit from a &struct
419  * sbitmap_queue.
420  * @sbq: Bitmap queue to allocate from.
421  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
422  *       sbitmap_queue_clear()).
423  *
424  * Return: Non-negative allocated bit number if successful, -1 otherwise.
425  */
426 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
427 				    unsigned int *cpu)
428 {
429 	int nr;
430 
431 	*cpu = get_cpu();
432 	nr = __sbitmap_queue_get(sbq);
433 	put_cpu();
434 	return nr;
435 }
436 
437 /**
438  * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
439  * sbitmap_queue, limiting the depth used from each word.
440  * @sbq: Bitmap queue to allocate from.
441  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
442  *       sbitmap_queue_clear()).
443  * @shallow_depth: The maximum number of bits to allocate from a single word.
444  * See sbitmap_get_shallow().
445  *
446  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
447  * initializing @sbq.
448  *
449  * Return: Non-negative allocated bit number if successful, -1 otherwise.
450  */
451 static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
452 					    unsigned int *cpu,
453 					    unsigned int shallow_depth)
454 {
455 	int nr;
456 
457 	*cpu = get_cpu();
458 	nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
459 	put_cpu();
460 	return nr;
461 }
462 
463 /**
464  * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
465  * minimum shallow depth that will be used.
466  * @sbq: Bitmap queue in question.
467  * @min_shallow_depth: The minimum shallow depth that will be passed to
468  * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
469  *
470  * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
471  * depends on the depth of the bitmap. Since the shallow allocation functions
472  * effectively operate with a different depth, the shallow depth must be taken
473  * into account when calculating the batch size. This function must be called
474  * with the minimum shallow depth that will be used. Failure to do so can result
475  * in missed wakeups.
476  */
477 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
478 				     unsigned int min_shallow_depth);
479 
480 /**
481  * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
482  * &struct sbitmap_queue.
483  * @sbq: Bitmap to free from.
484  * @nr: Bit number to free.
485  * @cpu: CPU the bit was allocated on.
486  */
487 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
488 			 unsigned int cpu);
489 
490 static inline int sbq_index_inc(int index)
491 {
492 	return (index + 1) & (SBQ_WAIT_QUEUES - 1);
493 }
494 
495 static inline void sbq_index_atomic_inc(atomic_t *index)
496 {
497 	int old = atomic_read(index);
498 	int new = sbq_index_inc(old);
499 	atomic_cmpxchg(index, old, new);
500 }
501 
502 /**
503  * sbq_wait_ptr() - Get the next wait queue to use for a &struct
504  * sbitmap_queue.
505  * @sbq: Bitmap queue to wait on.
506  * @wait_index: A counter per "user" of @sbq.
507  */
508 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
509 						  atomic_t *wait_index)
510 {
511 	struct sbq_wait_state *ws;
512 
513 	ws = &sbq->ws[atomic_read(wait_index)];
514 	sbq_index_atomic_inc(wait_index);
515 	return ws;
516 }
517 
518 /**
519  * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
520  * sbitmap_queue.
521  * @sbq: Bitmap queue to wake up.
522  */
523 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
524 
525 /**
526  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
527  * on a &struct sbitmap_queue.
528  * @sbq: Bitmap queue to wake up.
529  */
530 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
531 
532 /**
533  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
534  * seq_file.
535  * @sbq: Bitmap queue to show.
536  * @m: struct seq_file to write to.
537  *
538  * This is intended for debugging. The format may change at any time.
539  */
540 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
541 
542 struct sbq_wait {
543 	struct sbitmap_queue *sbq;	/* if set, sbq_wait is accounted */
544 	struct wait_queue_entry wait;
545 };
546 
547 #define DEFINE_SBQ_WAIT(name)							\
548 	struct sbq_wait name = {						\
549 		.sbq = NULL,							\
550 		.wait = {							\
551 			.private	= current,				\
552 			.func		= autoremove_wake_function,		\
553 			.entry		= LIST_HEAD_INIT((name).wait.entry),	\
554 		}								\
555 	}
556 
557 /*
558  * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
559  * internal state.
560  */
561 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
562 				struct sbq_wait_state *ws,
563 				struct sbq_wait *sbq_wait, int state);
564 
565 /*
566  * Must be paired with sbitmap_prepare_to_wait().
567  */
568 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
569 				struct sbq_wait *sbq_wait);
570 
571 /*
572  * Wrapper around add_wait_queue(), which maintains some extra internal state
573  */
574 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
575 			    struct sbq_wait_state *ws,
576 			    struct sbq_wait *sbq_wait);
577 
578 /*
579  * Must be paired with sbitmap_add_wait_queue()
580  */
581 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
582 
583 #endif /* __LINUX_SCALE_BITMAP_H */
584