10fc479b1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
288459642SOmar Sandoval /*
388459642SOmar Sandoval * Fast and scalable bitmaps.
488459642SOmar Sandoval *
588459642SOmar Sandoval * Copyright (C) 2016 Facebook
688459642SOmar Sandoval * Copyright (C) 2013-2014 Jens Axboe
788459642SOmar Sandoval */
888459642SOmar Sandoval
988459642SOmar Sandoval #ifndef __LINUX_SCALE_BITMAP_H
1088459642SOmar Sandoval #define __LINUX_SCALE_BITMAP_H
1188459642SOmar Sandoval
121fcbd5deSAndy Shevchenko #include <linux/atomic.h>
131fcbd5deSAndy Shevchenko #include <linux/bitops.h>
141fcbd5deSAndy Shevchenko #include <linux/cache.h>
151fcbd5deSAndy Shevchenko #include <linux/list.h>
161fcbd5deSAndy Shevchenko #include <linux/log2.h>
171fcbd5deSAndy Shevchenko #include <linux/minmax.h>
181fcbd5deSAndy Shevchenko #include <linux/percpu.h>
1988459642SOmar Sandoval #include <linux/slab.h>
201fcbd5deSAndy Shevchenko #include <linux/smp.h>
211fcbd5deSAndy Shevchenko #include <linux/types.h>
221fcbd5deSAndy Shevchenko #include <linux/wait.h>
2388459642SOmar Sandoval
2414b470b5SArnd Bergmann struct seq_file;
2514b470b5SArnd Bergmann
2688459642SOmar Sandoval /**
2788459642SOmar Sandoval * struct sbitmap_word - Word in a &struct sbitmap.
2888459642SOmar Sandoval */
2988459642SOmar Sandoval struct sbitmap_word {
3088459642SOmar Sandoval /**
31ea86ea2cSJens Axboe * @word: word holding free bits
32ea86ea2cSJens Axboe */
333301bc53SMing Lei unsigned long word;
34ea86ea2cSJens Axboe
35ea86ea2cSJens Axboe /**
36ea86ea2cSJens Axboe * @cleared: word holding cleared bits
37ea86ea2cSJens Axboe */
38ea86ea2cSJens Axboe unsigned long cleared ____cacheline_aligned_in_smp;
3972d04bdcSYang Yang
4072d04bdcSYang Yang /**
4172d04bdcSYang Yang * @swap_lock: serializes simultaneous updates of ->word and ->cleared
4272d04bdcSYang Yang */
43*65f666c6SMing Lei raw_spinlock_t swap_lock;
4488459642SOmar Sandoval } ____cacheline_aligned_in_smp;
4588459642SOmar Sandoval
4688459642SOmar Sandoval /**
4788459642SOmar Sandoval * struct sbitmap - Scalable bitmap.
4888459642SOmar Sandoval *
4988459642SOmar Sandoval * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
5088459642SOmar Sandoval * trades off higher memory usage for better scalability.
5188459642SOmar Sandoval */
5288459642SOmar Sandoval struct sbitmap {
5388459642SOmar Sandoval /**
5488459642SOmar Sandoval * @depth: Number of bits used in the whole bitmap.
5588459642SOmar Sandoval */
5688459642SOmar Sandoval unsigned int depth;
5788459642SOmar Sandoval
5888459642SOmar Sandoval /**
5988459642SOmar Sandoval * @shift: log2(number of bits used per word)
6088459642SOmar Sandoval */
6188459642SOmar Sandoval unsigned int shift;
6288459642SOmar Sandoval
6388459642SOmar Sandoval /**
6488459642SOmar Sandoval * @map_nr: Number of words (cachelines) being used for the bitmap.
6588459642SOmar Sandoval */
6688459642SOmar Sandoval unsigned int map_nr;
6788459642SOmar Sandoval
6888459642SOmar Sandoval /**
69efe1f3a1SMing Lei * @round_robin: Allocate bits in strict round-robin order.
70efe1f3a1SMing Lei */
71efe1f3a1SMing Lei bool round_robin;
72efe1f3a1SMing Lei
73efe1f3a1SMing Lei /**
7488459642SOmar Sandoval * @map: Allocated bitmap.
7588459642SOmar Sandoval */
7688459642SOmar Sandoval struct sbitmap_word *map;
77c548e62bSMing Lei
78c548e62bSMing Lei /*
79c548e62bSMing Lei * @alloc_hint: Cache of last successfully allocated or freed bit.
80c548e62bSMing Lei *
81c548e62bSMing Lei * This is per-cpu, which allows multiple users to stick to different
82c548e62bSMing Lei * cachelines until the map is exhausted.
83c548e62bSMing Lei */
84c548e62bSMing Lei unsigned int __percpu *alloc_hint;
8588459642SOmar Sandoval };
8688459642SOmar Sandoval
8788459642SOmar Sandoval #define SBQ_WAIT_QUEUES 8
8888459642SOmar Sandoval #define SBQ_WAKE_BATCH 8
8988459642SOmar Sandoval
9088459642SOmar Sandoval /**
9188459642SOmar Sandoval * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
9288459642SOmar Sandoval */
9388459642SOmar Sandoval struct sbq_wait_state {
9488459642SOmar Sandoval /**
9588459642SOmar Sandoval * @wait: Wait queue.
9688459642SOmar Sandoval */
9788459642SOmar Sandoval wait_queue_head_t wait;
9888459642SOmar Sandoval } ____cacheline_aligned_in_smp;
9988459642SOmar Sandoval
10088459642SOmar Sandoval /**
10188459642SOmar Sandoval * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
10288459642SOmar Sandoval * bits.
10388459642SOmar Sandoval *
10488459642SOmar Sandoval * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
10588459642SOmar Sandoval * avoid contention on the wait queue spinlock. This ensures that we don't hit a
10688459642SOmar Sandoval * scalability wall when we run out of free bits and have to start putting tasks
10788459642SOmar Sandoval * to sleep.
10888459642SOmar Sandoval */
10988459642SOmar Sandoval struct sbitmap_queue {
11088459642SOmar Sandoval /**
11188459642SOmar Sandoval * @sb: Scalable bitmap.
11288459642SOmar Sandoval */
11388459642SOmar Sandoval struct sbitmap sb;
11488459642SOmar Sandoval
11588459642SOmar Sandoval /**
11688459642SOmar Sandoval * @wake_batch: Number of bits which must be freed before we wake up any
11788459642SOmar Sandoval * waiters.
11888459642SOmar Sandoval */
11988459642SOmar Sandoval unsigned int wake_batch;
12088459642SOmar Sandoval
12188459642SOmar Sandoval /**
12288459642SOmar Sandoval * @wake_index: Next wait queue in @ws to wake up.
12388459642SOmar Sandoval */
12488459642SOmar Sandoval atomic_t wake_index;
12588459642SOmar Sandoval
12688459642SOmar Sandoval /**
12788459642SOmar Sandoval * @ws: Wait queues.
12888459642SOmar Sandoval */
12988459642SOmar Sandoval struct sbq_wait_state *ws;
130f4a644dbSOmar Sandoval
1315d2ee712SJens Axboe /*
1325d2ee712SJens Axboe * @ws_active: count of currently active ws waitqueues
1335d2ee712SJens Axboe */
1345d2ee712SJens Axboe atomic_t ws_active;
1355d2ee712SJens Axboe
136f4a644dbSOmar Sandoval /**
137a3275539SOmar Sandoval * @min_shallow_depth: The minimum shallow depth which may be passed to
1383f607293SJohn Garry * sbitmap_queue_get_shallow()
139a3275539SOmar Sandoval */
140a3275539SOmar Sandoval unsigned int min_shallow_depth;
1414f8126bbSGabriel Krisman Bertazi
1424f8126bbSGabriel Krisman Bertazi /**
1434f8126bbSGabriel Krisman Bertazi * @completion_cnt: Number of bits cleared passed to the
1444f8126bbSGabriel Krisman Bertazi * wakeup function.
1454f8126bbSGabriel Krisman Bertazi */
1464f8126bbSGabriel Krisman Bertazi atomic_t completion_cnt;
1474f8126bbSGabriel Krisman Bertazi
1484f8126bbSGabriel Krisman Bertazi /**
1494f8126bbSGabriel Krisman Bertazi * @wakeup_cnt: Number of thread wake ups issued.
1504f8126bbSGabriel Krisman Bertazi */
1514f8126bbSGabriel Krisman Bertazi atomic_t wakeup_cnt;
15288459642SOmar Sandoval };
15388459642SOmar Sandoval
15488459642SOmar Sandoval /**
15588459642SOmar Sandoval * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
15688459642SOmar Sandoval * @sb: Bitmap to initialize.
15788459642SOmar Sandoval * @depth: Number of bits to allocate.
15888459642SOmar Sandoval * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
15988459642SOmar Sandoval * given, a good default is chosen.
16088459642SOmar Sandoval * @flags: Allocation flags.
16188459642SOmar Sandoval * @node: Memory node to allocate on.
162efe1f3a1SMing Lei * @round_robin: If true, be stricter about allocation order; always allocate
163efe1f3a1SMing Lei * starting from the last allocated bit. This is less efficient
164efe1f3a1SMing Lei * than the default behavior (false).
165c548e62bSMing Lei * @alloc_hint: If true, apply percpu hint for where to start searching for
166c548e62bSMing Lei * a free bit.
16788459642SOmar Sandoval *
16888459642SOmar Sandoval * Return: Zero on success or negative errno on failure.
16988459642SOmar Sandoval */
17088459642SOmar Sandoval int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
171c548e62bSMing Lei gfp_t flags, int node, bool round_robin, bool alloc_hint);
17288459642SOmar Sandoval
1733301bc53SMing Lei /* sbitmap internal helper */
__map_depth(const struct sbitmap * sb,int index)1743301bc53SMing Lei static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
1753301bc53SMing Lei {
1763301bc53SMing Lei if (index == sb->map_nr - 1)
1773301bc53SMing Lei return sb->depth - (index << sb->shift);
1783301bc53SMing Lei return 1U << sb->shift;
1793301bc53SMing Lei }
1803301bc53SMing Lei
18188459642SOmar Sandoval /**
18288459642SOmar Sandoval * sbitmap_free() - Free memory used by a &struct sbitmap.
18388459642SOmar Sandoval * @sb: Bitmap to free.
18488459642SOmar Sandoval */
sbitmap_free(struct sbitmap * sb)18588459642SOmar Sandoval static inline void sbitmap_free(struct sbitmap *sb)
18688459642SOmar Sandoval {
187c548e62bSMing Lei free_percpu(sb->alloc_hint);
188863a66cdSMing Lei kvfree(sb->map);
18988459642SOmar Sandoval sb->map = NULL;
19088459642SOmar Sandoval }
19188459642SOmar Sandoval
19288459642SOmar Sandoval /**
19388459642SOmar Sandoval * sbitmap_resize() - Resize a &struct sbitmap.
19488459642SOmar Sandoval * @sb: Bitmap to resize.
19588459642SOmar Sandoval * @depth: New number of bits to resize to.
19688459642SOmar Sandoval *
19788459642SOmar Sandoval * Doesn't reallocate anything. It's up to the caller to ensure that the new
19888459642SOmar Sandoval * depth doesn't exceed the depth that the sb was initialized with.
19988459642SOmar Sandoval */
20088459642SOmar Sandoval void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
20188459642SOmar Sandoval
20288459642SOmar Sandoval /**
20388459642SOmar Sandoval * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
20488459642SOmar Sandoval * @sb: Bitmap to allocate from.
20588459642SOmar Sandoval *
2064ace53f1SOmar Sandoval * This operation provides acquire barrier semantics if it succeeds.
2074ace53f1SOmar Sandoval *
20888459642SOmar Sandoval * Return: Non-negative allocated bit number if successful, -1 otherwise.
20988459642SOmar Sandoval */
210c548e62bSMing Lei int sbitmap_get(struct sbitmap *sb);
21188459642SOmar Sandoval
21288459642SOmar Sandoval /**
213c05e6673SOmar Sandoval * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
214c05e6673SOmar Sandoval * limiting the depth used from each word.
215c05e6673SOmar Sandoval * @sb: Bitmap to allocate from.
216c05e6673SOmar Sandoval * @shallow_depth: The maximum number of bits to allocate from a single word.
217c05e6673SOmar Sandoval *
218c05e6673SOmar Sandoval * This rather specific operation allows for having multiple users with
219c05e6673SOmar Sandoval * different allocation limits. E.g., there can be a high-priority class that
220c05e6673SOmar Sandoval * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
221c05e6673SOmar Sandoval * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
222c05e6673SOmar Sandoval * class can only allocate half of the total bits in the bitmap, preventing it
223c05e6673SOmar Sandoval * from starving out the high-priority class.
224c05e6673SOmar Sandoval *
225c05e6673SOmar Sandoval * Return: Non-negative allocated bit number if successful, -1 otherwise.
226c05e6673SOmar Sandoval */
227c548e62bSMing Lei int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
228c05e6673SOmar Sandoval
229c05e6673SOmar Sandoval /**
23088459642SOmar Sandoval * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
23188459642SOmar Sandoval * @sb: Bitmap to check.
23288459642SOmar Sandoval *
23388459642SOmar Sandoval * Return: true if any bit in the bitmap is set, false otherwise.
23488459642SOmar Sandoval */
23588459642SOmar Sandoval bool sbitmap_any_bit_set(const struct sbitmap *sb);
23688459642SOmar Sandoval
2377930d0a0SMing Lei #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
2387930d0a0SMing Lei #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
2397930d0a0SMing Lei
24088459642SOmar Sandoval typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
24188459642SOmar Sandoval
24288459642SOmar Sandoval /**
2437930d0a0SMing Lei * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
2447930d0a0SMing Lei * @start: Where to start the iteration.
24588459642SOmar Sandoval * @sb: Bitmap to iterate over.
24688459642SOmar Sandoval * @fn: Callback. Should return true to continue or false to break early.
24788459642SOmar Sandoval * @data: Pointer to pass to callback.
24888459642SOmar Sandoval *
24988459642SOmar Sandoval * This is inline even though it's non-trivial so that the function calls to the
25088459642SOmar Sandoval * callback will hopefully get optimized away.
25188459642SOmar Sandoval */
__sbitmap_for_each_set(struct sbitmap * sb,unsigned int start,sb_for_each_fn fn,void * data)2527930d0a0SMing Lei static inline void __sbitmap_for_each_set(struct sbitmap *sb,
2537930d0a0SMing Lei unsigned int start,
2547930d0a0SMing Lei sb_for_each_fn fn, void *data)
25588459642SOmar Sandoval {
2567930d0a0SMing Lei unsigned int index;
2577930d0a0SMing Lei unsigned int nr;
2587930d0a0SMing Lei unsigned int scanned = 0;
25988459642SOmar Sandoval
2607930d0a0SMing Lei if (start >= sb->depth)
2617930d0a0SMing Lei start = 0;
2627930d0a0SMing Lei index = SB_NR_TO_INDEX(sb, start);
2637930d0a0SMing Lei nr = SB_NR_TO_BIT(sb, start);
26488459642SOmar Sandoval
2657930d0a0SMing Lei while (scanned < sb->depth) {
2668c2def89SOmar Sandoval unsigned long word;
2678c2def89SOmar Sandoval unsigned int depth = min_t(unsigned int,
2683301bc53SMing Lei __map_depth(sb, index) - nr,
2697930d0a0SMing Lei sb->depth - scanned);
2707930d0a0SMing Lei
2717930d0a0SMing Lei scanned += depth;
2728c2def89SOmar Sandoval word = sb->map[index].word & ~sb->map[index].cleared;
2738c2def89SOmar Sandoval if (!word)
2747930d0a0SMing Lei goto next;
27588459642SOmar Sandoval
2767930d0a0SMing Lei /*
2777930d0a0SMing Lei * On the first iteration of the outer loop, we need to add the
2787930d0a0SMing Lei * bit offset back to the size of the word for find_next_bit().
2797930d0a0SMing Lei * On all other iterations, nr is zero, so this is a noop.
2807930d0a0SMing Lei */
2817930d0a0SMing Lei depth += nr;
28288459642SOmar Sandoval while (1) {
2838c2def89SOmar Sandoval nr = find_next_bit(&word, depth, nr);
2847930d0a0SMing Lei if (nr >= depth)
28588459642SOmar Sandoval break;
2867930d0a0SMing Lei if (!fn(sb, (index << sb->shift) + nr, data))
28788459642SOmar Sandoval return;
28888459642SOmar Sandoval
28988459642SOmar Sandoval nr++;
29088459642SOmar Sandoval }
2917930d0a0SMing Lei next:
2927930d0a0SMing Lei nr = 0;
2937930d0a0SMing Lei if (++index >= sb->map_nr)
2947930d0a0SMing Lei index = 0;
29588459642SOmar Sandoval }
29688459642SOmar Sandoval }
29788459642SOmar Sandoval
2987930d0a0SMing Lei /**
2997930d0a0SMing Lei * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
3007930d0a0SMing Lei * @sb: Bitmap to iterate over.
3017930d0a0SMing Lei * @fn: Callback. Should return true to continue or false to break early.
3027930d0a0SMing Lei * @data: Pointer to pass to callback.
3037930d0a0SMing Lei */
sbitmap_for_each_set(struct sbitmap * sb,sb_for_each_fn fn,void * data)3047930d0a0SMing Lei static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
3057930d0a0SMing Lei void *data)
3067930d0a0SMing Lei {
3077930d0a0SMing Lei __sbitmap_for_each_set(sb, 0, fn, data);
3087930d0a0SMing Lei }
30988459642SOmar Sandoval
__sbitmap_word(struct sbitmap * sb,unsigned int bitnr)31088459642SOmar Sandoval static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
31188459642SOmar Sandoval unsigned int bitnr)
31288459642SOmar Sandoval {
31388459642SOmar Sandoval return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
31488459642SOmar Sandoval }
31588459642SOmar Sandoval
31688459642SOmar Sandoval /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
31788459642SOmar Sandoval
sbitmap_set_bit(struct sbitmap * sb,unsigned int bitnr)31888459642SOmar Sandoval static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
31988459642SOmar Sandoval {
32088459642SOmar Sandoval set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
32188459642SOmar Sandoval }
32288459642SOmar Sandoval
sbitmap_clear_bit(struct sbitmap * sb,unsigned int bitnr)32388459642SOmar Sandoval static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
32488459642SOmar Sandoval {
32588459642SOmar Sandoval clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
32688459642SOmar Sandoval }
32788459642SOmar Sandoval
328ea86ea2cSJens Axboe /*
329ea86ea2cSJens Axboe * This one is special, since it doesn't actually clear the bit, rather it
330ea86ea2cSJens Axboe * sets the corresponding bit in the ->cleared mask instead. Paired with
3311e4471e7SShenghui Wang * the caller doing sbitmap_deferred_clear() if a given index is full, which
332ea86ea2cSJens Axboe * will clear the previously freed entries in the corresponding ->word.
333ea86ea2cSJens Axboe */
sbitmap_deferred_clear_bit(struct sbitmap * sb,unsigned int bitnr)334ea86ea2cSJens Axboe static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
335ea86ea2cSJens Axboe {
336ea86ea2cSJens Axboe unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
337ea86ea2cSJens Axboe
338ea86ea2cSJens Axboe set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
339ea86ea2cSJens Axboe }
340ea86ea2cSJens Axboe
341c548e62bSMing Lei /*
342c548e62bSMing Lei * Pair of sbitmap_get, and this one applies both cleared bit and
343c548e62bSMing Lei * allocation hint.
344c548e62bSMing Lei */
sbitmap_put(struct sbitmap * sb,unsigned int bitnr)345c548e62bSMing Lei static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
346c548e62bSMing Lei {
347c548e62bSMing Lei sbitmap_deferred_clear_bit(sb, bitnr);
348c548e62bSMing Lei
349c548e62bSMing Lei if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
350035e9f47SBart Van Assche *raw_cpu_ptr(sb->alloc_hint) = bitnr;
351c548e62bSMing Lei }
352c548e62bSMing Lei
sbitmap_test_bit(struct sbitmap * sb,unsigned int bitnr)35388459642SOmar Sandoval static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
35488459642SOmar Sandoval {
35588459642SOmar Sandoval return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
35688459642SOmar Sandoval }
35788459642SOmar Sandoval
sbitmap_calculate_shift(unsigned int depth)3582d13b1eaSMing Lei static inline int sbitmap_calculate_shift(unsigned int depth)
3592d13b1eaSMing Lei {
3602d13b1eaSMing Lei int shift = ilog2(BITS_PER_LONG);
3612d13b1eaSMing Lei
3622d13b1eaSMing Lei /*
3632d13b1eaSMing Lei * If the bitmap is small, shrink the number of bits per word so
3642d13b1eaSMing Lei * we spread over a few cachelines, at least. If less than 4
3652d13b1eaSMing Lei * bits, just forget about it, it's not going to work optimally
3662d13b1eaSMing Lei * anyway.
3672d13b1eaSMing Lei */
3682d13b1eaSMing Lei if (depth >= 4) {
3692d13b1eaSMing Lei while ((4U << shift) > depth)
3702d13b1eaSMing Lei shift--;
3712d13b1eaSMing Lei }
3722d13b1eaSMing Lei
3732d13b1eaSMing Lei return shift;
3742d13b1eaSMing Lei }
3752d13b1eaSMing Lei
37688459642SOmar Sandoval /**
37724af1ccfSOmar Sandoval * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
37824af1ccfSOmar Sandoval * @sb: Bitmap to show.
37924af1ccfSOmar Sandoval * @m: struct seq_file to write to.
38024af1ccfSOmar Sandoval *
38124af1ccfSOmar Sandoval * This is intended for debugging. The format may change at any time.
38224af1ccfSOmar Sandoval */
38324af1ccfSOmar Sandoval void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
38424af1ccfSOmar Sandoval
385cbb9950bSMing Lei
386cbb9950bSMing Lei /**
387cbb9950bSMing Lei * sbitmap_weight() - Return how many set and not cleared bits in a &struct
388cbb9950bSMing Lei * sbitmap.
389cbb9950bSMing Lei * @sb: Bitmap to check.
390cbb9950bSMing Lei *
391cbb9950bSMing Lei * Return: How many set and not cleared bits set
392cbb9950bSMing Lei */
393cbb9950bSMing Lei unsigned int sbitmap_weight(const struct sbitmap *sb);
394cbb9950bSMing Lei
39524af1ccfSOmar Sandoval /**
39624af1ccfSOmar Sandoval * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
39724af1ccfSOmar Sandoval * seq_file.
39824af1ccfSOmar Sandoval * @sb: Bitmap to show.
39924af1ccfSOmar Sandoval * @m: struct seq_file to write to.
40024af1ccfSOmar Sandoval *
40124af1ccfSOmar Sandoval * This is intended for debugging. The output isn't guaranteed to be internally
40224af1ccfSOmar Sandoval * consistent.
40324af1ccfSOmar Sandoval */
40424af1ccfSOmar Sandoval void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
40524af1ccfSOmar Sandoval
40624af1ccfSOmar Sandoval /**
40788459642SOmar Sandoval * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
40888459642SOmar Sandoval * memory node.
40988459642SOmar Sandoval * @sbq: Bitmap queue to initialize.
41088459642SOmar Sandoval * @depth: See sbitmap_init_node().
41188459642SOmar Sandoval * @shift: See sbitmap_init_node().
412f4a644dbSOmar Sandoval * @round_robin: See sbitmap_get().
41388459642SOmar Sandoval * @flags: Allocation flags.
41488459642SOmar Sandoval * @node: Memory node to allocate on.
41588459642SOmar Sandoval *
41688459642SOmar Sandoval * Return: Zero on success or negative errno on failure.
41788459642SOmar Sandoval */
41888459642SOmar Sandoval int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
419f4a644dbSOmar Sandoval int shift, bool round_robin, gfp_t flags, int node);
42088459642SOmar Sandoval
42188459642SOmar Sandoval /**
42288459642SOmar Sandoval * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
42388459642SOmar Sandoval *
42488459642SOmar Sandoval * @sbq: Bitmap queue to free.
42588459642SOmar Sandoval */
sbitmap_queue_free(struct sbitmap_queue * sbq)42688459642SOmar Sandoval static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
42788459642SOmar Sandoval {
42888459642SOmar Sandoval kfree(sbq->ws);
42988459642SOmar Sandoval sbitmap_free(&sbq->sb);
43088459642SOmar Sandoval }
43188459642SOmar Sandoval
43288459642SOmar Sandoval /**
433180dccb0SLaibin Qiu * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
434180dccb0SLaibin Qiu * @sbq: Bitmap queue to recalculate wake batch.
435180dccb0SLaibin Qiu * @users: Number of shares.
436180dccb0SLaibin Qiu *
437180dccb0SLaibin Qiu * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
438180dccb0SLaibin Qiu * by depth. This interface is for HCTX shared tags or queue shared tags.
439180dccb0SLaibin Qiu */
440180dccb0SLaibin Qiu void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
441180dccb0SLaibin Qiu unsigned int users);
442180dccb0SLaibin Qiu
443180dccb0SLaibin Qiu /**
44488459642SOmar Sandoval * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
44588459642SOmar Sandoval * @sbq: Bitmap queue to resize.
44688459642SOmar Sandoval * @depth: New number of bits to resize to.
44788459642SOmar Sandoval *
44888459642SOmar Sandoval * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
44988459642SOmar Sandoval * some extra work on the &struct sbitmap_queue, so it's not safe to just
45088459642SOmar Sandoval * resize the underlying &struct sbitmap.
45188459642SOmar Sandoval */
45288459642SOmar Sandoval void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
45388459642SOmar Sandoval
45488459642SOmar Sandoval /**
45540aabb67SOmar Sandoval * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
45640aabb67SOmar Sandoval * sbitmap_queue with preemption already disabled.
45740aabb67SOmar Sandoval * @sbq: Bitmap queue to allocate from.
45840aabb67SOmar Sandoval *
45940aabb67SOmar Sandoval * Return: Non-negative allocated bit number if successful, -1 otherwise.
46040aabb67SOmar Sandoval */
461f4a644dbSOmar Sandoval int __sbitmap_queue_get(struct sbitmap_queue *sbq);
46240aabb67SOmar Sandoval
46340aabb67SOmar Sandoval /**
4649672b0d4SJens Axboe * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
4659672b0d4SJens Axboe * @sbq: Bitmap queue to allocate from.
4669672b0d4SJens Axboe * @nr_tags: number of tags requested
4679672b0d4SJens Axboe * @offset: offset to add to returned bits
4689672b0d4SJens Axboe *
4699672b0d4SJens Axboe * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
4709672b0d4SJens Axboe * a bit in the mask returned, and the caller must add @offset to the value to
4719672b0d4SJens Axboe * get the absolute tag value.
4729672b0d4SJens Axboe */
4739672b0d4SJens Axboe unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
4749672b0d4SJens Axboe unsigned int *offset);
4759672b0d4SJens Axboe
4769672b0d4SJens Axboe /**
4773f607293SJohn Garry * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
478c05e6673SOmar Sandoval * sbitmap_queue, limiting the depth used from each word, with preemption
479c05e6673SOmar Sandoval * already disabled.
480c05e6673SOmar Sandoval * @sbq: Bitmap queue to allocate from.
481c05e6673SOmar Sandoval * @shallow_depth: The maximum number of bits to allocate from a single word.
482c05e6673SOmar Sandoval * See sbitmap_get_shallow().
483c05e6673SOmar Sandoval *
484a3275539SOmar Sandoval * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
485a3275539SOmar Sandoval * initializing @sbq.
486a3275539SOmar Sandoval *
487c05e6673SOmar Sandoval * Return: Non-negative allocated bit number if successful, -1 otherwise.
488c05e6673SOmar Sandoval */
4893f607293SJohn Garry int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
490c05e6673SOmar Sandoval unsigned int shallow_depth);
491c05e6673SOmar Sandoval
492c05e6673SOmar Sandoval /**
49340aabb67SOmar Sandoval * sbitmap_queue_get() - Try to allocate a free bit from a &struct
49440aabb67SOmar Sandoval * sbitmap_queue.
49540aabb67SOmar Sandoval * @sbq: Bitmap queue to allocate from.
49640aabb67SOmar Sandoval * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
49740aabb67SOmar Sandoval * sbitmap_queue_clear()).
49840aabb67SOmar Sandoval *
49940aabb67SOmar Sandoval * Return: Non-negative allocated bit number if successful, -1 otherwise.
50040aabb67SOmar Sandoval */
sbitmap_queue_get(struct sbitmap_queue * sbq,unsigned int * cpu)501f4a644dbSOmar Sandoval static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
50240aabb67SOmar Sandoval unsigned int *cpu)
50340aabb67SOmar Sandoval {
50440aabb67SOmar Sandoval int nr;
50540aabb67SOmar Sandoval
50640aabb67SOmar Sandoval *cpu = get_cpu();
507f4a644dbSOmar Sandoval nr = __sbitmap_queue_get(sbq);
50840aabb67SOmar Sandoval put_cpu();
50940aabb67SOmar Sandoval return nr;
51040aabb67SOmar Sandoval }
51140aabb67SOmar Sandoval
51240aabb67SOmar Sandoval /**
513a3275539SOmar Sandoval * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
514a3275539SOmar Sandoval * minimum shallow depth that will be used.
515a3275539SOmar Sandoval * @sbq: Bitmap queue in question.
516a3275539SOmar Sandoval * @min_shallow_depth: The minimum shallow depth that will be passed to
517a3275539SOmar Sandoval * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
518a3275539SOmar Sandoval *
519a3275539SOmar Sandoval * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
520a3275539SOmar Sandoval * depends on the depth of the bitmap. Since the shallow allocation functions
521a3275539SOmar Sandoval * effectively operate with a different depth, the shallow depth must be taken
522a3275539SOmar Sandoval * into account when calculating the batch size. This function must be called
523a3275539SOmar Sandoval * with the minimum shallow depth that will be used. Failure to do so can result
524a3275539SOmar Sandoval * in missed wakeups.
525a3275539SOmar Sandoval */
526a3275539SOmar Sandoval void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
527a3275539SOmar Sandoval unsigned int min_shallow_depth);
528a3275539SOmar Sandoval
529a3275539SOmar Sandoval /**
53088459642SOmar Sandoval * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
53188459642SOmar Sandoval * &struct sbitmap_queue.
53288459642SOmar Sandoval * @sbq: Bitmap to free from.
53388459642SOmar Sandoval * @nr: Bit number to free.
53440aabb67SOmar Sandoval * @cpu: CPU the bit was allocated on.
53588459642SOmar Sandoval */
53640aabb67SOmar Sandoval void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
537f4a644dbSOmar Sandoval unsigned int cpu);
53888459642SOmar Sandoval
5391aec5e4aSJens Axboe /**
5401aec5e4aSJens Axboe * sbitmap_queue_clear_batch() - Free a batch of allocated bits
5411aec5e4aSJens Axboe * &struct sbitmap_queue.
5421aec5e4aSJens Axboe * @sbq: Bitmap to free from.
5431aec5e4aSJens Axboe * @offset: offset for each tag in array
5441aec5e4aSJens Axboe * @tags: array of tags
5451aec5e4aSJens Axboe * @nr_tags: number of tags in array
5461aec5e4aSJens Axboe */
5471aec5e4aSJens Axboe void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
5481aec5e4aSJens Axboe int *tags, int nr_tags);
5491aec5e4aSJens Axboe
sbq_index_inc(int index)55088459642SOmar Sandoval static inline int sbq_index_inc(int index)
55188459642SOmar Sandoval {
55288459642SOmar Sandoval return (index + 1) & (SBQ_WAIT_QUEUES - 1);
55388459642SOmar Sandoval }
55488459642SOmar Sandoval
sbq_index_atomic_inc(atomic_t * index)55588459642SOmar Sandoval static inline void sbq_index_atomic_inc(atomic_t *index)
55688459642SOmar Sandoval {
55788459642SOmar Sandoval int old = atomic_read(index);
55888459642SOmar Sandoval int new = sbq_index_inc(old);
55988459642SOmar Sandoval atomic_cmpxchg(index, old, new);
56088459642SOmar Sandoval }
56188459642SOmar Sandoval
56288459642SOmar Sandoval /**
56388459642SOmar Sandoval * sbq_wait_ptr() - Get the next wait queue to use for a &struct
56488459642SOmar Sandoval * sbitmap_queue.
56588459642SOmar Sandoval * @sbq: Bitmap queue to wait on.
56688459642SOmar Sandoval * @wait_index: A counter per "user" of @sbq.
56788459642SOmar Sandoval */
sbq_wait_ptr(struct sbitmap_queue * sbq,atomic_t * wait_index)56888459642SOmar Sandoval static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
56988459642SOmar Sandoval atomic_t *wait_index)
57088459642SOmar Sandoval {
57188459642SOmar Sandoval struct sbq_wait_state *ws;
57288459642SOmar Sandoval
57388459642SOmar Sandoval ws = &sbq->ws[atomic_read(wait_index)];
57488459642SOmar Sandoval sbq_index_atomic_inc(wait_index);
57588459642SOmar Sandoval return ws;
57688459642SOmar Sandoval }
57788459642SOmar Sandoval
57888459642SOmar Sandoval /**
57988459642SOmar Sandoval * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
58088459642SOmar Sandoval * sbitmap_queue.
58188459642SOmar Sandoval * @sbq: Bitmap queue to wake up.
58288459642SOmar Sandoval */
58388459642SOmar Sandoval void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
58488459642SOmar Sandoval
58524af1ccfSOmar Sandoval /**
586e6fc4649SMing Lei * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
587e6fc4649SMing Lei * on a &struct sbitmap_queue.
588e6fc4649SMing Lei * @sbq: Bitmap queue to wake up.
5894acb8341SKeith Busch * @nr: Number of bits cleared.
590e6fc4649SMing Lei */
5914acb8341SKeith Busch void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
592e6fc4649SMing Lei
593e6fc4649SMing Lei /**
59424af1ccfSOmar Sandoval * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
59524af1ccfSOmar Sandoval * seq_file.
59624af1ccfSOmar Sandoval * @sbq: Bitmap queue to show.
59724af1ccfSOmar Sandoval * @m: struct seq_file to write to.
59824af1ccfSOmar Sandoval *
59924af1ccfSOmar Sandoval * This is intended for debugging. The format may change at any time.
60024af1ccfSOmar Sandoval */
60124af1ccfSOmar Sandoval void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
60224af1ccfSOmar Sandoval
6035d2ee712SJens Axboe struct sbq_wait {
6049f6b7ef6SJens Axboe struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
6055d2ee712SJens Axboe struct wait_queue_entry wait;
6065d2ee712SJens Axboe };
6075d2ee712SJens Axboe
6085d2ee712SJens Axboe #define DEFINE_SBQ_WAIT(name) \
6095d2ee712SJens Axboe struct sbq_wait name = { \
6109f6b7ef6SJens Axboe .sbq = NULL, \
6115d2ee712SJens Axboe .wait = { \
6125d2ee712SJens Axboe .private = current, \
6135d2ee712SJens Axboe .func = autoremove_wake_function, \
6145d2ee712SJens Axboe .entry = LIST_HEAD_INIT((name).wait.entry), \
6155d2ee712SJens Axboe } \
6165d2ee712SJens Axboe }
6175d2ee712SJens Axboe
6185d2ee712SJens Axboe /*
6195d2ee712SJens Axboe * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
6205d2ee712SJens Axboe * internal state.
6215d2ee712SJens Axboe */
6225d2ee712SJens Axboe void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
6235d2ee712SJens Axboe struct sbq_wait_state *ws,
6245d2ee712SJens Axboe struct sbq_wait *sbq_wait, int state);
6255d2ee712SJens Axboe
6265d2ee712SJens Axboe /*
6275d2ee712SJens Axboe * Must be paired with sbitmap_prepare_to_wait().
6285d2ee712SJens Axboe */
6295d2ee712SJens Axboe void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
6305d2ee712SJens Axboe struct sbq_wait *sbq_wait);
6315d2ee712SJens Axboe
6329f6b7ef6SJens Axboe /*
6339f6b7ef6SJens Axboe * Wrapper around add_wait_queue(), which maintains some extra internal state
6349f6b7ef6SJens Axboe */
6359f6b7ef6SJens Axboe void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
6369f6b7ef6SJens Axboe struct sbq_wait_state *ws,
6379f6b7ef6SJens Axboe struct sbq_wait *sbq_wait);
6389f6b7ef6SJens Axboe
6399f6b7ef6SJens Axboe /*
6409f6b7ef6SJens Axboe * Must be paired with sbitmap_add_wait_queue()
6419f6b7ef6SJens Axboe */
6429f6b7ef6SJens Axboe void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
6439f6b7ef6SJens Axboe
64488459642SOmar Sandoval #endif /* __LINUX_SCALE_BITMAP_H */
645