xref: /linux-6.15/include/linux/sbitmap.h (revision cbb9950b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Fast and scalable bitmaps.
4  *
5  * Copyright (C) 2016 Facebook
6  * Copyright (C) 2013-2014 Jens Axboe
7  */
8 
9 #ifndef __LINUX_SCALE_BITMAP_H
10 #define __LINUX_SCALE_BITMAP_H
11 
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 
15 struct seq_file;
16 
17 /**
18  * struct sbitmap_word - Word in a &struct sbitmap.
19  */
20 struct sbitmap_word {
21 	/**
22 	 * @depth: Number of bits being used in @word/@cleared
23 	 */
24 	unsigned long depth;
25 
26 	/**
27 	 * @word: word holding free bits
28 	 */
29 	unsigned long word ____cacheline_aligned_in_smp;
30 
31 	/**
32 	 * @cleared: word holding cleared bits
33 	 */
34 	unsigned long cleared ____cacheline_aligned_in_smp;
35 } ____cacheline_aligned_in_smp;
36 
37 /**
38  * struct sbitmap - Scalable bitmap.
39  *
40  * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
41  * trades off higher memory usage for better scalability.
42  */
43 struct sbitmap {
44 	/**
45 	 * @depth: Number of bits used in the whole bitmap.
46 	 */
47 	unsigned int depth;
48 
49 	/**
50 	 * @shift: log2(number of bits used per word)
51 	 */
52 	unsigned int shift;
53 
54 	/**
55 	 * @map_nr: Number of words (cachelines) being used for the bitmap.
56 	 */
57 	unsigned int map_nr;
58 
59 	/**
60 	 * @round_robin: Allocate bits in strict round-robin order.
61 	 */
62 	bool round_robin;
63 
64 	/**
65 	 * @map: Allocated bitmap.
66 	 */
67 	struct sbitmap_word *map;
68 
69 	/*
70 	 * @alloc_hint: Cache of last successfully allocated or freed bit.
71 	 *
72 	 * This is per-cpu, which allows multiple users to stick to different
73 	 * cachelines until the map is exhausted.
74 	 */
75 	unsigned int __percpu *alloc_hint;
76 };
77 
78 #define SBQ_WAIT_QUEUES 8
79 #define SBQ_WAKE_BATCH 8
80 
81 /**
82  * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
83  */
84 struct sbq_wait_state {
85 	/**
86 	 * @wait_cnt: Number of frees remaining before we wake up.
87 	 */
88 	atomic_t wait_cnt;
89 
90 	/**
91 	 * @wait: Wait queue.
92 	 */
93 	wait_queue_head_t wait;
94 } ____cacheline_aligned_in_smp;
95 
96 /**
97  * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
98  * bits.
99  *
100  * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
101  * avoid contention on the wait queue spinlock. This ensures that we don't hit a
102  * scalability wall when we run out of free bits and have to start putting tasks
103  * to sleep.
104  */
105 struct sbitmap_queue {
106 	/**
107 	 * @sb: Scalable bitmap.
108 	 */
109 	struct sbitmap sb;
110 
111 	/**
112 	 * @wake_batch: Number of bits which must be freed before we wake up any
113 	 * waiters.
114 	 */
115 	unsigned int wake_batch;
116 
117 	/**
118 	 * @wake_index: Next wait queue in @ws to wake up.
119 	 */
120 	atomic_t wake_index;
121 
122 	/**
123 	 * @ws: Wait queues.
124 	 */
125 	struct sbq_wait_state *ws;
126 
127 	/*
128 	 * @ws_active: count of currently active ws waitqueues
129 	 */
130 	atomic_t ws_active;
131 
132 	/**
133 	 * @min_shallow_depth: The minimum shallow depth which may be passed to
134 	 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
135 	 */
136 	unsigned int min_shallow_depth;
137 };
138 
139 /**
140  * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
141  * @sb: Bitmap to initialize.
142  * @depth: Number of bits to allocate.
143  * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
144  *         given, a good default is chosen.
145  * @flags: Allocation flags.
146  * @node: Memory node to allocate on.
147  * @round_robin: If true, be stricter about allocation order; always allocate
148  *               starting from the last allocated bit. This is less efficient
149  *               than the default behavior (false).
150  * @alloc_hint: If true, apply percpu hint for where to start searching for
151  *              a free bit.
152  *
153  * Return: Zero on success or negative errno on failure.
154  */
155 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
156 		      gfp_t flags, int node, bool round_robin, bool alloc_hint);
157 
158 /**
159  * sbitmap_free() - Free memory used by a &struct sbitmap.
160  * @sb: Bitmap to free.
161  */
162 static inline void sbitmap_free(struct sbitmap *sb)
163 {
164 	free_percpu(sb->alloc_hint);
165 	kfree(sb->map);
166 	sb->map = NULL;
167 }
168 
169 /**
170  * sbitmap_resize() - Resize a &struct sbitmap.
171  * @sb: Bitmap to resize.
172  * @depth: New number of bits to resize to.
173  *
174  * Doesn't reallocate anything. It's up to the caller to ensure that the new
175  * depth doesn't exceed the depth that the sb was initialized with.
176  */
177 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
178 
179 /**
180  * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
181  * @sb: Bitmap to allocate from.
182  *
183  * This operation provides acquire barrier semantics if it succeeds.
184  *
185  * Return: Non-negative allocated bit number if successful, -1 otherwise.
186  */
187 int sbitmap_get(struct sbitmap *sb);
188 
189 /**
190  * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
191  * limiting the depth used from each word.
192  * @sb: Bitmap to allocate from.
193  * @shallow_depth: The maximum number of bits to allocate from a single word.
194  *
195  * This rather specific operation allows for having multiple users with
196  * different allocation limits. E.g., there can be a high-priority class that
197  * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
198  * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
199  * class can only allocate half of the total bits in the bitmap, preventing it
200  * from starving out the high-priority class.
201  *
202  * Return: Non-negative allocated bit number if successful, -1 otherwise.
203  */
204 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
205 
206 /**
207  * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
208  * @sb: Bitmap to check.
209  *
210  * Return: true if any bit in the bitmap is set, false otherwise.
211  */
212 bool sbitmap_any_bit_set(const struct sbitmap *sb);
213 
214 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
215 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
216 
217 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
218 
219 /**
220  * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
221  * @start: Where to start the iteration.
222  * @sb: Bitmap to iterate over.
223  * @fn: Callback. Should return true to continue or false to break early.
224  * @data: Pointer to pass to callback.
225  *
226  * This is inline even though it's non-trivial so that the function calls to the
227  * callback will hopefully get optimized away.
228  */
229 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
230 					  unsigned int start,
231 					  sb_for_each_fn fn, void *data)
232 {
233 	unsigned int index;
234 	unsigned int nr;
235 	unsigned int scanned = 0;
236 
237 	if (start >= sb->depth)
238 		start = 0;
239 	index = SB_NR_TO_INDEX(sb, start);
240 	nr = SB_NR_TO_BIT(sb, start);
241 
242 	while (scanned < sb->depth) {
243 		unsigned long word;
244 		unsigned int depth = min_t(unsigned int,
245 					   sb->map[index].depth - nr,
246 					   sb->depth - scanned);
247 
248 		scanned += depth;
249 		word = sb->map[index].word & ~sb->map[index].cleared;
250 		if (!word)
251 			goto next;
252 
253 		/*
254 		 * On the first iteration of the outer loop, we need to add the
255 		 * bit offset back to the size of the word for find_next_bit().
256 		 * On all other iterations, nr is zero, so this is a noop.
257 		 */
258 		depth += nr;
259 		while (1) {
260 			nr = find_next_bit(&word, depth, nr);
261 			if (nr >= depth)
262 				break;
263 			if (!fn(sb, (index << sb->shift) + nr, data))
264 				return;
265 
266 			nr++;
267 		}
268 next:
269 		nr = 0;
270 		if (++index >= sb->map_nr)
271 			index = 0;
272 	}
273 }
274 
275 /**
276  * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
277  * @sb: Bitmap to iterate over.
278  * @fn: Callback. Should return true to continue or false to break early.
279  * @data: Pointer to pass to callback.
280  */
281 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
282 					void *data)
283 {
284 	__sbitmap_for_each_set(sb, 0, fn, data);
285 }
286 
287 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
288 					    unsigned int bitnr)
289 {
290 	return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
291 }
292 
293 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
294 
295 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
296 {
297 	set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
298 }
299 
300 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
301 {
302 	clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
303 }
304 
305 /*
306  * This one is special, since it doesn't actually clear the bit, rather it
307  * sets the corresponding bit in the ->cleared mask instead. Paired with
308  * the caller doing sbitmap_deferred_clear() if a given index is full, which
309  * will clear the previously freed entries in the corresponding ->word.
310  */
311 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
312 {
313 	unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
314 
315 	set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
316 }
317 
318 /*
319  * Pair of sbitmap_get, and this one applies both cleared bit and
320  * allocation hint.
321  */
322 static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
323 {
324 	sbitmap_deferred_clear_bit(sb, bitnr);
325 
326 	if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
327 		*this_cpu_ptr(sb->alloc_hint) = bitnr;
328 }
329 
330 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
331 {
332 	return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
333 }
334 
335 /**
336  * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
337  * @sb: Bitmap to show.
338  * @m: struct seq_file to write to.
339  *
340  * This is intended for debugging. The format may change at any time.
341  */
342 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
343 
344 
345 /**
346  * sbitmap_weight() - Return how many set and not cleared bits in a &struct
347  * sbitmap.
348  * @sb: Bitmap to check.
349  *
350  * Return: How many set and not cleared bits set
351  */
352 unsigned int sbitmap_weight(const struct sbitmap *sb);
353 
354 /**
355  * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
356  * seq_file.
357  * @sb: Bitmap to show.
358  * @m: struct seq_file to write to.
359  *
360  * This is intended for debugging. The output isn't guaranteed to be internally
361  * consistent.
362  */
363 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
364 
365 /**
366  * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
367  * memory node.
368  * @sbq: Bitmap queue to initialize.
369  * @depth: See sbitmap_init_node().
370  * @shift: See sbitmap_init_node().
371  * @round_robin: See sbitmap_get().
372  * @flags: Allocation flags.
373  * @node: Memory node to allocate on.
374  *
375  * Return: Zero on success or negative errno on failure.
376  */
377 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
378 			    int shift, bool round_robin, gfp_t flags, int node);
379 
380 /**
381  * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
382  *
383  * @sbq: Bitmap queue to free.
384  */
385 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
386 {
387 	kfree(sbq->ws);
388 	sbitmap_free(&sbq->sb);
389 }
390 
391 /**
392  * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
393  * @sbq: Bitmap queue to resize.
394  * @depth: New number of bits to resize to.
395  *
396  * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
397  * some extra work on the &struct sbitmap_queue, so it's not safe to just
398  * resize the underlying &struct sbitmap.
399  */
400 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
401 
402 /**
403  * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
404  * sbitmap_queue with preemption already disabled.
405  * @sbq: Bitmap queue to allocate from.
406  *
407  * Return: Non-negative allocated bit number if successful, -1 otherwise.
408  */
409 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
410 
411 /**
412  * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
413  * sbitmap_queue, limiting the depth used from each word, with preemption
414  * already disabled.
415  * @sbq: Bitmap queue to allocate from.
416  * @shallow_depth: The maximum number of bits to allocate from a single word.
417  * See sbitmap_get_shallow().
418  *
419  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
420  * initializing @sbq.
421  *
422  * Return: Non-negative allocated bit number if successful, -1 otherwise.
423  */
424 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
425 				unsigned int shallow_depth);
426 
427 /**
428  * sbitmap_queue_get() - Try to allocate a free bit from a &struct
429  * sbitmap_queue.
430  * @sbq: Bitmap queue to allocate from.
431  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
432  *       sbitmap_queue_clear()).
433  *
434  * Return: Non-negative allocated bit number if successful, -1 otherwise.
435  */
436 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
437 				    unsigned int *cpu)
438 {
439 	int nr;
440 
441 	*cpu = get_cpu();
442 	nr = __sbitmap_queue_get(sbq);
443 	put_cpu();
444 	return nr;
445 }
446 
447 /**
448  * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
449  * sbitmap_queue, limiting the depth used from each word.
450  * @sbq: Bitmap queue to allocate from.
451  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
452  *       sbitmap_queue_clear()).
453  * @shallow_depth: The maximum number of bits to allocate from a single word.
454  * See sbitmap_get_shallow().
455  *
456  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
457  * initializing @sbq.
458  *
459  * Return: Non-negative allocated bit number if successful, -1 otherwise.
460  */
461 static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
462 					    unsigned int *cpu,
463 					    unsigned int shallow_depth)
464 {
465 	int nr;
466 
467 	*cpu = get_cpu();
468 	nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
469 	put_cpu();
470 	return nr;
471 }
472 
473 /**
474  * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
475  * minimum shallow depth that will be used.
476  * @sbq: Bitmap queue in question.
477  * @min_shallow_depth: The minimum shallow depth that will be passed to
478  * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
479  *
480  * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
481  * depends on the depth of the bitmap. Since the shallow allocation functions
482  * effectively operate with a different depth, the shallow depth must be taken
483  * into account when calculating the batch size. This function must be called
484  * with the minimum shallow depth that will be used. Failure to do so can result
485  * in missed wakeups.
486  */
487 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
488 				     unsigned int min_shallow_depth);
489 
490 /**
491  * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
492  * &struct sbitmap_queue.
493  * @sbq: Bitmap to free from.
494  * @nr: Bit number to free.
495  * @cpu: CPU the bit was allocated on.
496  */
497 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
498 			 unsigned int cpu);
499 
500 static inline int sbq_index_inc(int index)
501 {
502 	return (index + 1) & (SBQ_WAIT_QUEUES - 1);
503 }
504 
505 static inline void sbq_index_atomic_inc(atomic_t *index)
506 {
507 	int old = atomic_read(index);
508 	int new = sbq_index_inc(old);
509 	atomic_cmpxchg(index, old, new);
510 }
511 
512 /**
513  * sbq_wait_ptr() - Get the next wait queue to use for a &struct
514  * sbitmap_queue.
515  * @sbq: Bitmap queue to wait on.
516  * @wait_index: A counter per "user" of @sbq.
517  */
518 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
519 						  atomic_t *wait_index)
520 {
521 	struct sbq_wait_state *ws;
522 
523 	ws = &sbq->ws[atomic_read(wait_index)];
524 	sbq_index_atomic_inc(wait_index);
525 	return ws;
526 }
527 
528 /**
529  * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
530  * sbitmap_queue.
531  * @sbq: Bitmap queue to wake up.
532  */
533 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
534 
535 /**
536  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
537  * on a &struct sbitmap_queue.
538  * @sbq: Bitmap queue to wake up.
539  */
540 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
541 
542 /**
543  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
544  * seq_file.
545  * @sbq: Bitmap queue to show.
546  * @m: struct seq_file to write to.
547  *
548  * This is intended for debugging. The format may change at any time.
549  */
550 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
551 
552 struct sbq_wait {
553 	struct sbitmap_queue *sbq;	/* if set, sbq_wait is accounted */
554 	struct wait_queue_entry wait;
555 };
556 
557 #define DEFINE_SBQ_WAIT(name)							\
558 	struct sbq_wait name = {						\
559 		.sbq = NULL,							\
560 		.wait = {							\
561 			.private	= current,				\
562 			.func		= autoremove_wake_function,		\
563 			.entry		= LIST_HEAD_INIT((name).wait.entry),	\
564 		}								\
565 	}
566 
567 /*
568  * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
569  * internal state.
570  */
571 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
572 				struct sbq_wait_state *ws,
573 				struct sbq_wait *sbq_wait, int state);
574 
575 /*
576  * Must be paired with sbitmap_prepare_to_wait().
577  */
578 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
579 				struct sbq_wait *sbq_wait);
580 
581 /*
582  * Wrapper around add_wait_queue(), which maintains some extra internal state
583  */
584 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
585 			    struct sbq_wait_state *ws,
586 			    struct sbq_wait *sbq_wait);
587 
588 /*
589  * Must be paired with sbitmap_add_wait_queue()
590  */
591 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
592 
593 #endif /* __LINUX_SCALE_BITMAP_H */
594