1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cafe5635SKent Overstreet /*
3cafe5635SKent Overstreet * Primary bucket allocation code
4cafe5635SKent Overstreet *
5cafe5635SKent Overstreet * Copyright 2012 Google, Inc.
6cafe5635SKent Overstreet *
7cafe5635SKent Overstreet * Allocation in bcache is done in terms of buckets:
8cafe5635SKent Overstreet *
9cafe5635SKent Overstreet * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10cafe5635SKent Overstreet * btree pointers - they must match for the pointer to be considered valid.
11cafe5635SKent Overstreet *
12cafe5635SKent Overstreet * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13cafe5635SKent Overstreet * bucket simply by incrementing its gen.
14cafe5635SKent Overstreet *
15cafe5635SKent Overstreet * The gens (along with the priorities; it's really the gens are important but
16cafe5635SKent Overstreet * the code is named as if it's the priorities) are written in an arbitrary list
17cafe5635SKent Overstreet * of buckets on disk, with a pointer to them in the journal header.
18cafe5635SKent Overstreet *
19cafe5635SKent Overstreet * When we invalidate a bucket, we have to write its new gen to disk and wait
20cafe5635SKent Overstreet * for that write to complete before we use it - otherwise after a crash we
21cafe5635SKent Overstreet * could have pointers that appeared to be good but pointed to data that had
22cafe5635SKent Overstreet * been overwritten.
23cafe5635SKent Overstreet *
24cafe5635SKent Overstreet * Since the gens and priorities are all stored contiguously on disk, we can
25cafe5635SKent Overstreet * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26cafe5635SKent Overstreet * call prio_write(), and when prio_write() finishes we pull buckets off the
27cafe5635SKent Overstreet * free_inc list and optionally discard them.
28cafe5635SKent Overstreet *
29cafe5635SKent Overstreet * free_inc isn't the only freelist - if it was, we'd often to sleep while
30cafe5635SKent Overstreet * priorities and gens were being written before we could allocate. c->free is a
31cafe5635SKent Overstreet * smaller freelist, and buckets on that list are always ready to be used.
32cafe5635SKent Overstreet *
33cafe5635SKent Overstreet * If we've got discards enabled, that happens when a bucket moves from the
34cafe5635SKent Overstreet * free_inc list to the free list.
35cafe5635SKent Overstreet *
36cafe5635SKent Overstreet * There is another freelist, because sometimes we have buckets that we know
37cafe5635SKent Overstreet * have nothing pointing into them - these we can reuse without waiting for
38cafe5635SKent Overstreet * priorities to be rewritten. These come from freed btree nodes and buckets
39cafe5635SKent Overstreet * that garbage collection discovered no longer had valid keys pointing into
40cafe5635SKent Overstreet * them (because they were overwritten). That's the unused list - buckets on the
41cafe5635SKent Overstreet * unused list move to the free list, optionally being discarded in the process.
42cafe5635SKent Overstreet *
43cafe5635SKent Overstreet * It's also important to ensure that gens don't wrap around - with respect to
44cafe5635SKent Overstreet * either the oldest gen in the btree or the gen on disk. This is quite
45cafe5635SKent Overstreet * difficult to do in practice, but we explicitly guard against it anyways - if
46cafe5635SKent Overstreet * a bucket is in danger of wrapping around we simply skip invalidating it that
47cafe5635SKent Overstreet * time around, and we garbage collect or rewrite the priorities sooner than we
48cafe5635SKent Overstreet * would have otherwise.
49cafe5635SKent Overstreet *
50cafe5635SKent Overstreet * bch_bucket_alloc() allocates a single bucket from a specific cache.
51cafe5635SKent Overstreet *
5217e4aed8SColy Li * bch_bucket_alloc_set() allocates one bucket from different caches
53cafe5635SKent Overstreet * out of a cache set.
54cafe5635SKent Overstreet *
55cafe5635SKent Overstreet * free_some_buckets() drives all the processes described above. It's called
56cafe5635SKent Overstreet * from bch_bucket_alloc() and a few other places that need to make sure free
57cafe5635SKent Overstreet * buckets are ready.
58cafe5635SKent Overstreet *
59cafe5635SKent Overstreet * invalidate_buckets_(lru|fifo)() find buckets that are available to be
60cafe5635SKent Overstreet * invalidated, and then invalidate them and stick them on the free_inc list -
61cafe5635SKent Overstreet * in either lru or fifo order.
62cafe5635SKent Overstreet */
63cafe5635SKent Overstreet
64cafe5635SKent Overstreet #include "bcache.h"
65cafe5635SKent Overstreet #include "btree.h"
66cafe5635SKent Overstreet
6749b1212dSKent Overstreet #include <linux/blkdev.h>
68119ba0f8SKent Overstreet #include <linux/kthread.h>
69cafe5635SKent Overstreet #include <linux/random.h>
70c37511b8SKent Overstreet #include <trace/events/bcache.h>
71cafe5635SKent Overstreet
7289b1fc54STang Junhui #define MAX_OPEN_BUCKETS 128
7389b1fc54STang Junhui
74cafe5635SKent Overstreet /* Bucket heap / gen */
75cafe5635SKent Overstreet
bch_inc_gen(struct cache * ca,struct bucket * b)76cafe5635SKent Overstreet uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
77cafe5635SKent Overstreet {
78cafe5635SKent Overstreet uint8_t ret = ++b->gen;
79cafe5635SKent Overstreet
80cafe5635SKent Overstreet ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
81cafe5635SKent Overstreet WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
82cafe5635SKent Overstreet
83cafe5635SKent Overstreet return ret;
84cafe5635SKent Overstreet }
85cafe5635SKent Overstreet
bch_rescale_priorities(struct cache_set * c,int sectors)86cafe5635SKent Overstreet void bch_rescale_priorities(struct cache_set *c, int sectors)
87cafe5635SKent Overstreet {
88cafe5635SKent Overstreet struct cache *ca;
89cafe5635SKent Overstreet struct bucket *b;
904a784266SColy Li unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024;
91cafe5635SKent Overstreet int r;
92cafe5635SKent Overstreet
93cafe5635SKent Overstreet atomic_sub(sectors, &c->rescale);
94cafe5635SKent Overstreet
95cafe5635SKent Overstreet do {
96cafe5635SKent Overstreet r = atomic_read(&c->rescale);
97cafe5635SKent Overstreet
98cafe5635SKent Overstreet if (r >= 0)
99cafe5635SKent Overstreet return;
100cafe5635SKent Overstreet } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
101cafe5635SKent Overstreet
102cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
103cafe5635SKent Overstreet
104cafe5635SKent Overstreet c->min_prio = USHRT_MAX;
105cafe5635SKent Overstreet
10608fdb2cdSColy Li ca = c->cache;
107cafe5635SKent Overstreet for_each_bucket(b, ca)
108cafe5635SKent Overstreet if (b->prio &&
109cafe5635SKent Overstreet b->prio != BTREE_PRIO &&
110cafe5635SKent Overstreet !atomic_read(&b->pin)) {
111cafe5635SKent Overstreet b->prio--;
112cafe5635SKent Overstreet c->min_prio = min(c->min_prio, b->prio);
113cafe5635SKent Overstreet }
114cafe5635SKent Overstreet
115cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
116cafe5635SKent Overstreet }
117cafe5635SKent Overstreet
1182531d9eeSKent Overstreet /*
1192531d9eeSKent Overstreet * Background allocation thread: scans for buckets to be invalidated,
1202531d9eeSKent Overstreet * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
1212531d9eeSKent Overstreet * then optionally issues discard commands to the newly free buckets, then puts
1222531d9eeSKent Overstreet * them on the various freelists.
1232531d9eeSKent Overstreet */
124cafe5635SKent Overstreet
can_inc_bucket_gen(struct bucket * b)125cafe5635SKent Overstreet static inline bool can_inc_bucket_gen(struct bucket *b)
126cafe5635SKent Overstreet {
1272531d9eeSKent Overstreet return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
128cafe5635SKent Overstreet }
129cafe5635SKent Overstreet
bch_can_invalidate_bucket(struct cache * ca,struct bucket * b)1302531d9eeSKent Overstreet bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
131cafe5635SKent Overstreet {
132a14a68b7SDongsheng Yang return (ca->set->gc_mark_valid || b->reclaimable_in_gc) &&
133a14a68b7SDongsheng Yang ((!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
134a14a68b7SDongsheng Yang !atomic_read(&b->pin) && can_inc_bucket_gen(b));
135cafe5635SKent Overstreet }
136cafe5635SKent Overstreet
__bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)1372531d9eeSKent Overstreet void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
138cafe5635SKent Overstreet {
1392531d9eeSKent Overstreet lockdep_assert_held(&ca->set->bucket_lock);
1402531d9eeSKent Overstreet BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
1417159b1adSKent Overstreet
1427159b1adSKent Overstreet if (GC_SECTORS_USED(b))
1432531d9eeSKent Overstreet trace_bcache_invalidate(ca, b - ca->buckets);
1447159b1adSKent Overstreet
145cafe5635SKent Overstreet bch_inc_gen(ca, b);
146cafe5635SKent Overstreet b->prio = INITIAL_PRIO;
147cafe5635SKent Overstreet atomic_inc(&b->pin);
148a14a68b7SDongsheng Yang b->reclaimable_in_gc = 0;
1492531d9eeSKent Overstreet }
1502531d9eeSKent Overstreet
bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)1512531d9eeSKent Overstreet static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
1522531d9eeSKent Overstreet {
1532531d9eeSKent Overstreet __bch_invalidate_one_bucket(ca, b);
1542531d9eeSKent Overstreet
1552531d9eeSKent Overstreet fifo_push(&ca->free_inc, b - ca->buckets);
156cafe5635SKent Overstreet }
157cafe5635SKent Overstreet
158e0a985a4SKent Overstreet /*
159e0a985a4SKent Overstreet * Determines what order we're going to reuse buckets, smallest bucket_prio()
160e0a985a4SKent Overstreet * first: we also take into account the number of sectors of live data in that
161e0a985a4SKent Overstreet * bucket, and in order for that multiply to make sense we have to scale bucket
162e0a985a4SKent Overstreet *
163e0a985a4SKent Overstreet * Thus, we scale the bucket priorities so that the bucket with the smallest
164e0a985a4SKent Overstreet * prio is worth 1/8th of what INITIAL_PRIO is worth.
165e0a985a4SKent Overstreet */
166e0a985a4SKent Overstreet
new_bucket_prio(struct cache * ca,struct bucket * b)167866898efSKuan-Wei Chiu static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
168866898efSKuan-Wei Chiu {
169866898efSKuan-Wei Chiu unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
170b1a67b0fSKent Overstreet
171866898efSKuan-Wei Chiu return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
172866898efSKuan-Wei Chiu }
173866898efSKuan-Wei Chiu
new_bucket_max_cmp(const void * l,const void * r,void * args)174866898efSKuan-Wei Chiu static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
175866898efSKuan-Wei Chiu {
176866898efSKuan-Wei Chiu struct bucket **lhs = (struct bucket **)l;
177866898efSKuan-Wei Chiu struct bucket **rhs = (struct bucket **)r;
178866898efSKuan-Wei Chiu struct cache *ca = args;
179866898efSKuan-Wei Chiu
180866898efSKuan-Wei Chiu return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
181866898efSKuan-Wei Chiu }
182866898efSKuan-Wei Chiu
new_bucket_min_cmp(const void * l,const void * r,void * args)183866898efSKuan-Wei Chiu static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
184866898efSKuan-Wei Chiu {
185866898efSKuan-Wei Chiu struct bucket **lhs = (struct bucket **)l;
186866898efSKuan-Wei Chiu struct bucket **rhs = (struct bucket **)r;
187866898efSKuan-Wei Chiu struct cache *ca = args;
188866898efSKuan-Wei Chiu
189866898efSKuan-Wei Chiu return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
190866898efSKuan-Wei Chiu }
191866898efSKuan-Wei Chiu
invalidate_buckets_lru(struct cache * ca)192cafe5635SKent Overstreet static void invalidate_buckets_lru(struct cache *ca)
193cafe5635SKent Overstreet {
194cafe5635SKent Overstreet struct bucket *b;
195866898efSKuan-Wei Chiu const struct min_heap_callbacks bucket_max_cmp_callback = {
196866898efSKuan-Wei Chiu .less = new_bucket_max_cmp,
197*3d8a9a1cSKuan-Wei Chiu .swp = NULL,
198866898efSKuan-Wei Chiu };
199866898efSKuan-Wei Chiu const struct min_heap_callbacks bucket_min_cmp_callback = {
200866898efSKuan-Wei Chiu .less = new_bucket_min_cmp,
201*3d8a9a1cSKuan-Wei Chiu .swp = NULL,
202866898efSKuan-Wei Chiu };
203cafe5635SKent Overstreet
204866898efSKuan-Wei Chiu ca->heap.nr = 0;
205cafe5635SKent Overstreet
206cafe5635SKent Overstreet for_each_bucket(b, ca) {
2072531d9eeSKent Overstreet if (!bch_can_invalidate_bucket(ca, b))
20886b26b82SKent Overstreet continue;
20986b26b82SKent Overstreet
210866898efSKuan-Wei Chiu if (!min_heap_full(&ca->heap))
211866898efSKuan-Wei Chiu min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
212866898efSKuan-Wei Chiu else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
213cafe5635SKent Overstreet ca->heap.data[0] = b;
214866898efSKuan-Wei Chiu min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
215cafe5635SKent Overstreet }
216cafe5635SKent Overstreet }
217cafe5635SKent Overstreet
218866898efSKuan-Wei Chiu min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
219cafe5635SKent Overstreet
220cafe5635SKent Overstreet while (!fifo_full(&ca->free_inc)) {
221866898efSKuan-Wei Chiu if (!ca->heap.nr) {
22286b26b82SKent Overstreet /*
22386b26b82SKent Overstreet * We don't want to be calling invalidate_buckets()
224cafe5635SKent Overstreet * multiple times when it can't do anything
225cafe5635SKent Overstreet */
226cafe5635SKent Overstreet ca->invalidate_needs_gc = 1;
22772a44517SKent Overstreet wake_up_gc(ca->set);
228cafe5635SKent Overstreet return;
229cafe5635SKent Overstreet }
230866898efSKuan-Wei Chiu b = min_heap_peek(&ca->heap)[0];
231866898efSKuan-Wei Chiu min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
232cafe5635SKent Overstreet
2332531d9eeSKent Overstreet bch_invalidate_one_bucket(ca, b);
234cafe5635SKent Overstreet }
235cafe5635SKent Overstreet }
236cafe5635SKent Overstreet
invalidate_buckets_fifo(struct cache * ca)237cafe5635SKent Overstreet static void invalidate_buckets_fifo(struct cache *ca)
238cafe5635SKent Overstreet {
239cafe5635SKent Overstreet struct bucket *b;
240cafe5635SKent Overstreet size_t checked = 0;
241cafe5635SKent Overstreet
242cafe5635SKent Overstreet while (!fifo_full(&ca->free_inc)) {
243cafe5635SKent Overstreet if (ca->fifo_last_bucket < ca->sb.first_bucket ||
244cafe5635SKent Overstreet ca->fifo_last_bucket >= ca->sb.nbuckets)
245cafe5635SKent Overstreet ca->fifo_last_bucket = ca->sb.first_bucket;
246cafe5635SKent Overstreet
247cafe5635SKent Overstreet b = ca->buckets + ca->fifo_last_bucket++;
248cafe5635SKent Overstreet
2492531d9eeSKent Overstreet if (bch_can_invalidate_bucket(ca, b))
2502531d9eeSKent Overstreet bch_invalidate_one_bucket(ca, b);
251cafe5635SKent Overstreet
252cafe5635SKent Overstreet if (++checked >= ca->sb.nbuckets) {
253cafe5635SKent Overstreet ca->invalidate_needs_gc = 1;
25472a44517SKent Overstreet wake_up_gc(ca->set);
255cafe5635SKent Overstreet return;
256cafe5635SKent Overstreet }
257cafe5635SKent Overstreet }
258cafe5635SKent Overstreet }
259cafe5635SKent Overstreet
invalidate_buckets_random(struct cache * ca)260cafe5635SKent Overstreet static void invalidate_buckets_random(struct cache *ca)
261cafe5635SKent Overstreet {
262cafe5635SKent Overstreet struct bucket *b;
263cafe5635SKent Overstreet size_t checked = 0;
264cafe5635SKent Overstreet
265cafe5635SKent Overstreet while (!fifo_full(&ca->free_inc)) {
266cafe5635SKent Overstreet size_t n;
2671fae7cf0SColy Li
268cafe5635SKent Overstreet get_random_bytes(&n, sizeof(n));
269cafe5635SKent Overstreet
270cafe5635SKent Overstreet n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
271cafe5635SKent Overstreet n += ca->sb.first_bucket;
272cafe5635SKent Overstreet
273cafe5635SKent Overstreet b = ca->buckets + n;
274cafe5635SKent Overstreet
2752531d9eeSKent Overstreet if (bch_can_invalidate_bucket(ca, b))
2762531d9eeSKent Overstreet bch_invalidate_one_bucket(ca, b);
277cafe5635SKent Overstreet
278cafe5635SKent Overstreet if (++checked >= ca->sb.nbuckets / 2) {
279cafe5635SKent Overstreet ca->invalidate_needs_gc = 1;
28072a44517SKent Overstreet wake_up_gc(ca->set);
281cafe5635SKent Overstreet return;
282cafe5635SKent Overstreet }
283cafe5635SKent Overstreet }
284cafe5635SKent Overstreet }
285cafe5635SKent Overstreet
invalidate_buckets(struct cache * ca)286cafe5635SKent Overstreet static void invalidate_buckets(struct cache *ca)
287cafe5635SKent Overstreet {
2882531d9eeSKent Overstreet BUG_ON(ca->invalidate_needs_gc);
289cafe5635SKent Overstreet
290cafe5635SKent Overstreet switch (CACHE_REPLACEMENT(&ca->sb)) {
291cafe5635SKent Overstreet case CACHE_REPLACEMENT_LRU:
292cafe5635SKent Overstreet invalidate_buckets_lru(ca);
293cafe5635SKent Overstreet break;
294cafe5635SKent Overstreet case CACHE_REPLACEMENT_FIFO:
295cafe5635SKent Overstreet invalidate_buckets_fifo(ca);
296cafe5635SKent Overstreet break;
297cafe5635SKent Overstreet case CACHE_REPLACEMENT_RANDOM:
298cafe5635SKent Overstreet invalidate_buckets_random(ca);
299cafe5635SKent Overstreet break;
300cafe5635SKent Overstreet }
301cafe5635SKent Overstreet }
302cafe5635SKent Overstreet
303cafe5635SKent Overstreet #define allocator_wait(ca, cond) \
304cafe5635SKent Overstreet do { \
30586b26b82SKent Overstreet while (1) { \
306119ba0f8SKent Overstreet set_current_state(TASK_INTERRUPTIBLE); \
30786b26b82SKent Overstreet if (cond) \
30886b26b82SKent Overstreet break; \
309cafe5635SKent Overstreet \
310cafe5635SKent Overstreet mutex_unlock(&(ca)->set->bucket_lock); \
311771f393eSColy Li if (kthread_should_stop() || \
312771f393eSColy Li test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
31399361bbfSColy Li set_current_state(TASK_RUNNING); \
314ecb2ba8cSColy Li goto out; \
31599361bbfSColy Li } \
316cafe5635SKent Overstreet \
317cafe5635SKent Overstreet schedule(); \
318cafe5635SKent Overstreet mutex_lock(&(ca)->set->bucket_lock); \
319cafe5635SKent Overstreet } \
320119ba0f8SKent Overstreet __set_current_state(TASK_RUNNING); \
321cafe5635SKent Overstreet } while (0)
322cafe5635SKent Overstreet
bch_allocator_push(struct cache * ca,long bucket)32378365411SKent Overstreet static int bch_allocator_push(struct cache *ca, long bucket)
32478365411SKent Overstreet {
3256f10f7d1SColy Li unsigned int i;
32678365411SKent Overstreet
32778365411SKent Overstreet /* Prios/gens are actually the most important reserve */
32878365411SKent Overstreet if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
32978365411SKent Overstreet return true;
33078365411SKent Overstreet
33178365411SKent Overstreet for (i = 0; i < RESERVE_NR; i++)
33278365411SKent Overstreet if (fifo_push(&ca->free[i], bucket))
33378365411SKent Overstreet return true;
33478365411SKent Overstreet
33578365411SKent Overstreet return false;
33678365411SKent Overstreet }
33778365411SKent Overstreet
bch_allocator_thread(void * arg)338119ba0f8SKent Overstreet static int bch_allocator_thread(void *arg)
339cafe5635SKent Overstreet {
340119ba0f8SKent Overstreet struct cache *ca = arg;
341cafe5635SKent Overstreet
342cafe5635SKent Overstreet mutex_lock(&ca->set->bucket_lock);
343cafe5635SKent Overstreet
344cafe5635SKent Overstreet while (1) {
34586b26b82SKent Overstreet /*
34686b26b82SKent Overstreet * First, we pull buckets off of the unused and free_inc lists,
34786b26b82SKent Overstreet * possibly issue discards to them, then we add the bucket to
34886b26b82SKent Overstreet * the free list:
34986b26b82SKent Overstreet */
35078d4eb8aSArnd Bergmann while (1) {
351cafe5635SKent Overstreet long bucket;
352cafe5635SKent Overstreet
35378d4eb8aSArnd Bergmann if (!fifo_pop(&ca->free_inc, bucket))
35478d4eb8aSArnd Bergmann break;
355cafe5635SKent Overstreet
356cafe5635SKent Overstreet if (ca->discard) {
35749b1212dSKent Overstreet mutex_unlock(&ca->set->bucket_lock);
35849b1212dSKent Overstreet blkdev_issue_discard(ca->bdev,
35949b1212dSKent Overstreet bucket_to_sector(ca->set, bucket),
36044abff2cSChristoph Hellwig ca->sb.bucket_size, GFP_KERNEL);
36149b1212dSKent Overstreet mutex_lock(&ca->set->bucket_lock);
36249b1212dSKent Overstreet }
36349b1212dSKent Overstreet
36478365411SKent Overstreet allocator_wait(ca, bch_allocator_push(ca, bucket));
3650a63b66dSKent Overstreet wake_up(&ca->set->btree_cache_wait);
36635fcd848SKent Overstreet wake_up(&ca->set->bucket_wait);
367cafe5635SKent Overstreet }
368cafe5635SKent Overstreet
36986b26b82SKent Overstreet /*
37086b26b82SKent Overstreet * We've run out of free buckets, we need to find some buckets
37186b26b82SKent Overstreet * we can invalidate. First, invalidate them in memory and add
37286b26b82SKent Overstreet * them to the free_inc list:
37386b26b82SKent Overstreet */
37486b26b82SKent Overstreet
3752531d9eeSKent Overstreet retry_invalidate:
376a14a68b7SDongsheng Yang allocator_wait(ca, !ca->invalidate_needs_gc);
377cafe5635SKent Overstreet invalidate_buckets(ca);
378cafe5635SKent Overstreet
37986b26b82SKent Overstreet /*
38086b26b82SKent Overstreet * Now, we write their new gens to disk so we can start writing
38186b26b82SKent Overstreet * new stuff to them:
38286b26b82SKent Overstreet */
38386b26b82SKent Overstreet allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
3846f9414e0SColy Li if (CACHE_SYNC(&ca->sb)) {
3852531d9eeSKent Overstreet /*
3862531d9eeSKent Overstreet * This could deadlock if an allocation with a btree
3872531d9eeSKent Overstreet * node locked ever blocked - having the btree node
3882531d9eeSKent Overstreet * locked would block garbage collection, but here we're
3892531d9eeSKent Overstreet * waiting on garbage collection before we invalidate
3902531d9eeSKent Overstreet * and free anything.
3912531d9eeSKent Overstreet *
3922531d9eeSKent Overstreet * But this should be safe since the btree code always
3932531d9eeSKent Overstreet * uses btree_check_reserve() before allocating now, and
3942531d9eeSKent Overstreet * if it fails it blocks without btree nodes locked.
3952531d9eeSKent Overstreet */
3962531d9eeSKent Overstreet if (!fifo_full(&ca->free_inc))
3972531d9eeSKent Overstreet goto retry_invalidate;
3982531d9eeSKent Overstreet
39984c529aeSAndrea Righi if (bch_prio_write(ca, false) < 0) {
40084c529aeSAndrea Righi ca->invalidate_needs_gc = 1;
40184c529aeSAndrea Righi wake_up_gc(ca->set);
40284c529aeSAndrea Righi }
403cafe5635SKent Overstreet }
404cafe5635SKent Overstreet }
405ecb2ba8cSColy Li out:
406ecb2ba8cSColy Li wait_for_kthread_stop();
407ecb2ba8cSColy Li return 0;
4082531d9eeSKent Overstreet }
4092531d9eeSKent Overstreet
4102531d9eeSKent Overstreet /* Allocation */
411cafe5635SKent Overstreet
bch_bucket_alloc(struct cache * ca,unsigned int reserve,bool wait)4126f10f7d1SColy Li long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
413cafe5635SKent Overstreet {
41435fcd848SKent Overstreet DEFINE_WAIT(w);
41535fcd848SKent Overstreet struct bucket *b;
41635fcd848SKent Overstreet long r;
41735fcd848SKent Overstreet
418e775339eSColy Li
419e775339eSColy Li /* No allocation if CACHE_SET_IO_DISABLE bit is set */
420e775339eSColy Li if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
421e775339eSColy Li return -1;
422e775339eSColy Li
42335fcd848SKent Overstreet /* fastpath */
42478365411SKent Overstreet if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
42578365411SKent Overstreet fifo_pop(&ca->free[reserve], r))
42635fcd848SKent Overstreet goto out;
42735fcd848SKent Overstreet
4287159b1adSKent Overstreet if (!wait) {
4297159b1adSKent Overstreet trace_bcache_alloc_fail(ca, reserve);
43035fcd848SKent Overstreet return -1;
4317159b1adSKent Overstreet }
43235fcd848SKent Overstreet
43378365411SKent Overstreet do {
43435fcd848SKent Overstreet prepare_to_wait(&ca->set->bucket_wait, &w,
43535fcd848SKent Overstreet TASK_UNINTERRUPTIBLE);
43635fcd848SKent Overstreet
43735fcd848SKent Overstreet mutex_unlock(&ca->set->bucket_lock);
43835fcd848SKent Overstreet schedule();
43935fcd848SKent Overstreet mutex_lock(&ca->set->bucket_lock);
44078365411SKent Overstreet } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
44178365411SKent Overstreet !fifo_pop(&ca->free[reserve], r));
44235fcd848SKent Overstreet
44335fcd848SKent Overstreet finish_wait(&ca->set->bucket_wait, &w);
44435fcd848SKent Overstreet out:
44591af8300SColy Li if (ca->alloc_thread)
446119ba0f8SKent Overstreet wake_up_process(ca->alloc_thread);
447cafe5635SKent Overstreet
4487159b1adSKent Overstreet trace_bcache_alloc(ca, reserve);
4497159b1adSKent Overstreet
450280481d0SKent Overstreet if (expensive_debug_checks(ca->set)) {
451cafe5635SKent Overstreet size_t iter;
452cafe5635SKent Overstreet long i;
4536f10f7d1SColy Li unsigned int j;
454cafe5635SKent Overstreet
455cafe5635SKent Overstreet for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
456cafe5635SKent Overstreet BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
457cafe5635SKent Overstreet
45878365411SKent Overstreet for (j = 0; j < RESERVE_NR; j++)
45978365411SKent Overstreet fifo_for_each(i, &ca->free[j], iter)
460cafe5635SKent Overstreet BUG_ON(i == r);
461cafe5635SKent Overstreet fifo_for_each(i, &ca->free_inc, iter)
462cafe5635SKent Overstreet BUG_ON(i == r);
46335fcd848SKent Overstreet }
464280481d0SKent Overstreet
46535fcd848SKent Overstreet b = ca->buckets + r;
46635fcd848SKent Overstreet
467cafe5635SKent Overstreet BUG_ON(atomic_read(&b->pin) != 1);
468cafe5635SKent Overstreet
469cafe5635SKent Overstreet SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
470cafe5635SKent Overstreet
47178365411SKent Overstreet if (reserve <= RESERVE_PRIO) {
472cafe5635SKent Overstreet SET_GC_MARK(b, GC_MARK_METADATA);
473981aa8c0SNicholas Swenson SET_GC_MOVE(b, 0);
474cafe5635SKent Overstreet b->prio = BTREE_PRIO;
475cafe5635SKent Overstreet } else {
476cafe5635SKent Overstreet SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
477981aa8c0SNicholas Swenson SET_GC_MOVE(b, 0);
478cafe5635SKent Overstreet b->prio = INITIAL_PRIO;
479cafe5635SKent Overstreet }
480cafe5635SKent Overstreet
481d44c2f9eSTang Junhui if (ca->set->avail_nbuckets > 0) {
482d44c2f9eSTang Junhui ca->set->avail_nbuckets--;
483d44c2f9eSTang Junhui bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
484d44c2f9eSTang Junhui }
485d44c2f9eSTang Junhui
486cafe5635SKent Overstreet return r;
487cafe5635SKent Overstreet }
488cafe5635SKent Overstreet
__bch_bucket_free(struct cache * ca,struct bucket * b)4892531d9eeSKent Overstreet void __bch_bucket_free(struct cache *ca, struct bucket *b)
4902531d9eeSKent Overstreet {
4912531d9eeSKent Overstreet SET_GC_MARK(b, 0);
4922531d9eeSKent Overstreet SET_GC_SECTORS_USED(b, 0);
493d44c2f9eSTang Junhui
494d44c2f9eSTang Junhui if (ca->set->avail_nbuckets < ca->set->nbuckets) {
495d44c2f9eSTang Junhui ca->set->avail_nbuckets++;
496d44c2f9eSTang Junhui bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
497d44c2f9eSTang Junhui }
4982531d9eeSKent Overstreet }
4992531d9eeSKent Overstreet
bch_bucket_free(struct cache_set * c,struct bkey * k)500cafe5635SKent Overstreet void bch_bucket_free(struct cache_set *c, struct bkey *k)
501cafe5635SKent Overstreet {
5026f10f7d1SColy Li unsigned int i;
503cafe5635SKent Overstreet
5042531d9eeSKent Overstreet for (i = 0; i < KEY_PTRS(k); i++)
50511e9560eSChristoph Hellwig __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i));
506cafe5635SKent Overstreet }
507cafe5635SKent Overstreet
__bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,bool wait)5086f10f7d1SColy Li int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
50917e4aed8SColy Li struct bkey *k, bool wait)
510cafe5635SKent Overstreet {
51117e4aed8SColy Li struct cache *ca;
51217e4aed8SColy Li long b;
513cafe5635SKent Overstreet
514e775339eSColy Li /* No allocation if CACHE_SET_IO_DISABLE bit is set */
515e775339eSColy Li if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
516e775339eSColy Li return -1;
517e775339eSColy Li
518cafe5635SKent Overstreet lockdep_assert_held(&c->bucket_lock);
519cafe5635SKent Overstreet
520cafe5635SKent Overstreet bkey_init(k);
521cafe5635SKent Overstreet
522697e2349SColy Li ca = c->cache;
52317e4aed8SColy Li b = bch_bucket_alloc(ca, reserve, wait);
52474d4ce92SColy Li if (b < 0)
52574d4ce92SColy Li return -1;
526cafe5635SKent Overstreet
52717e4aed8SColy Li k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
528cafe5635SKent Overstreet bucket_to_sector(c, b),
529cafe5635SKent Overstreet ca->sb.nr_this_dev);
530cafe5635SKent Overstreet
53117e4aed8SColy Li SET_KEY_PTRS(k, 1);
532cafe5635SKent Overstreet
533cafe5635SKent Overstreet return 0;
534cafe5635SKent Overstreet }
535cafe5635SKent Overstreet
bch_bucket_alloc_set(struct cache_set * c,unsigned int reserve,struct bkey * k,bool wait)5366f10f7d1SColy Li int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
53717e4aed8SColy Li struct bkey *k, bool wait)
538cafe5635SKent Overstreet {
539cafe5635SKent Overstreet int ret;
5401fae7cf0SColy Li
541cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
54217e4aed8SColy Li ret = __bch_bucket_alloc_set(c, reserve, k, wait);
543cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
544cafe5635SKent Overstreet return ret;
545cafe5635SKent Overstreet }
546cafe5635SKent Overstreet
5472599b53bSKent Overstreet /* Sector allocator */
5482599b53bSKent Overstreet
5492599b53bSKent Overstreet struct open_bucket {
5502599b53bSKent Overstreet struct list_head list;
5516f10f7d1SColy Li unsigned int last_write_point;
5526f10f7d1SColy Li unsigned int sectors_free;
5532599b53bSKent Overstreet BKEY_PADDED(key);
5542599b53bSKent Overstreet };
5552599b53bSKent Overstreet
5562599b53bSKent Overstreet /*
5572599b53bSKent Overstreet * We keep multiple buckets open for writes, and try to segregate different
5584eca1cb2STang Junhui * write streams for better cache utilization: first we try to segregate flash
5594eca1cb2STang Junhui * only volume write streams from cached devices, secondly we look for a bucket
5604eca1cb2STang Junhui * where the last write to it was sequential with the current write, and
5614eca1cb2STang Junhui * failing that we look for a bucket that was last used by the same task.
5622599b53bSKent Overstreet *
5632599b53bSKent Overstreet * The ideas is if you've got multiple tasks pulling data into the cache at the
5642599b53bSKent Overstreet * same time, you'll get better cache utilization if you try to segregate their
5652599b53bSKent Overstreet * data and preserve locality.
5662599b53bSKent Overstreet *
5674eca1cb2STang Junhui * For example, dirty sectors of flash only volume is not reclaimable, if their
5684eca1cb2STang Junhui * dirty sectors mixed with dirty sectors of cached device, such buckets will
5694eca1cb2STang Junhui * be marked as dirty and won't be reclaimed, though the dirty data of cached
5704eca1cb2STang Junhui * device have been written back to backend device.
5714eca1cb2STang Junhui *
5724eca1cb2STang Junhui * And say you've starting Firefox at the same time you're copying a
5732599b53bSKent Overstreet * bunch of files. Firefox will likely end up being fairly hot and stay in the
5742599b53bSKent Overstreet * cache awhile, but the data you copied might not be; if you wrote all that
5752599b53bSKent Overstreet * data to the same buckets it'd get invalidated at the same time.
5762599b53bSKent Overstreet *
5772599b53bSKent Overstreet * Both of those tasks will be doing fairly random IO so we can't rely on
5782599b53bSKent Overstreet * detecting sequential IO to segregate their data, but going off of the task
5792599b53bSKent Overstreet * should be a sane heuristic.
5802599b53bSKent Overstreet */
pick_data_bucket(struct cache_set * c,const struct bkey * search,unsigned int write_point,struct bkey * alloc)5812599b53bSKent Overstreet static struct open_bucket *pick_data_bucket(struct cache_set *c,
5822599b53bSKent Overstreet const struct bkey *search,
5836f10f7d1SColy Li unsigned int write_point,
5842599b53bSKent Overstreet struct bkey *alloc)
5852599b53bSKent Overstreet {
5862599b53bSKent Overstreet struct open_bucket *ret, *ret_task = NULL;
5872599b53bSKent Overstreet
5882599b53bSKent Overstreet list_for_each_entry_reverse(ret, &c->data_buckets, list)
5894eca1cb2STang Junhui if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
5904eca1cb2STang Junhui UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
5914eca1cb2STang Junhui continue;
5924eca1cb2STang Junhui else if (!bkey_cmp(&ret->key, search))
5932599b53bSKent Overstreet goto found;
5942599b53bSKent Overstreet else if (ret->last_write_point == write_point)
5952599b53bSKent Overstreet ret_task = ret;
5962599b53bSKent Overstreet
5972599b53bSKent Overstreet ret = ret_task ?: list_first_entry(&c->data_buckets,
5982599b53bSKent Overstreet struct open_bucket, list);
5992599b53bSKent Overstreet found:
6002599b53bSKent Overstreet if (!ret->sectors_free && KEY_PTRS(alloc)) {
6014a784266SColy Li ret->sectors_free = c->cache->sb.bucket_size;
6022599b53bSKent Overstreet bkey_copy(&ret->key, alloc);
6032599b53bSKent Overstreet bkey_init(alloc);
6042599b53bSKent Overstreet }
6052599b53bSKent Overstreet
6062599b53bSKent Overstreet if (!ret->sectors_free)
6072599b53bSKent Overstreet ret = NULL;
6082599b53bSKent Overstreet
6092599b53bSKent Overstreet return ret;
6102599b53bSKent Overstreet }
6112599b53bSKent Overstreet
6122599b53bSKent Overstreet /*
6132599b53bSKent Overstreet * Allocates some space in the cache to write to, and k to point to the newly
6142599b53bSKent Overstreet * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
6152599b53bSKent Overstreet * end of the newly allocated space).
6162599b53bSKent Overstreet *
6172599b53bSKent Overstreet * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
6182599b53bSKent Overstreet * sectors were actually allocated.
6192599b53bSKent Overstreet *
6202599b53bSKent Overstreet * If s->writeback is true, will not fail.
6212599b53bSKent Overstreet */
bch_alloc_sectors(struct cache_set * c,struct bkey * k,unsigned int sectors,unsigned int write_point,unsigned int write_prio,bool wait)6226f10f7d1SColy Li bool bch_alloc_sectors(struct cache_set *c,
6236f10f7d1SColy Li struct bkey *k,
6246f10f7d1SColy Li unsigned int sectors,
6256f10f7d1SColy Li unsigned int write_point,
6266f10f7d1SColy Li unsigned int write_prio,
6276f10f7d1SColy Li bool wait)
6282599b53bSKent Overstreet {
6292599b53bSKent Overstreet struct open_bucket *b;
6302599b53bSKent Overstreet BKEY_PADDED(key) alloc;
6316f10f7d1SColy Li unsigned int i;
6322599b53bSKent Overstreet
6332599b53bSKent Overstreet /*
6342599b53bSKent Overstreet * We might have to allocate a new bucket, which we can't do with a
6352599b53bSKent Overstreet * spinlock held. So if we have to allocate, we drop the lock, allocate
6362599b53bSKent Overstreet * and then retry. KEY_PTRS() indicates whether alloc points to
6372599b53bSKent Overstreet * allocated bucket(s).
6382599b53bSKent Overstreet */
6392599b53bSKent Overstreet
6402599b53bSKent Overstreet bkey_init(&alloc.key);
6412599b53bSKent Overstreet spin_lock(&c->data_bucket_lock);
6422599b53bSKent Overstreet
6432599b53bSKent Overstreet while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
6446f10f7d1SColy Li unsigned int watermark = write_prio
64578365411SKent Overstreet ? RESERVE_MOVINGGC
64678365411SKent Overstreet : RESERVE_NONE;
6472599b53bSKent Overstreet
6482599b53bSKent Overstreet spin_unlock(&c->data_bucket_lock);
6492599b53bSKent Overstreet
65017e4aed8SColy Li if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
6512599b53bSKent Overstreet return false;
6522599b53bSKent Overstreet
6532599b53bSKent Overstreet spin_lock(&c->data_bucket_lock);
6542599b53bSKent Overstreet }
6552599b53bSKent Overstreet
6562599b53bSKent Overstreet /*
6572599b53bSKent Overstreet * If we had to allocate, we might race and not need to allocate the
658b1e8139eSColy Li * second time we call pick_data_bucket(). If we allocated a bucket but
6592599b53bSKent Overstreet * didn't use it, drop the refcount bch_bucket_alloc_set() took:
6602599b53bSKent Overstreet */
6612599b53bSKent Overstreet if (KEY_PTRS(&alloc.key))
6623a3b6a4eSKent Overstreet bkey_put(c, &alloc.key);
6632599b53bSKent Overstreet
6642599b53bSKent Overstreet for (i = 0; i < KEY_PTRS(&b->key); i++)
6652599b53bSKent Overstreet EBUG_ON(ptr_stale(c, &b->key, i));
6662599b53bSKent Overstreet
6672599b53bSKent Overstreet /* Set up the pointer to the space we're allocating: */
6682599b53bSKent Overstreet
6692599b53bSKent Overstreet for (i = 0; i < KEY_PTRS(&b->key); i++)
6702599b53bSKent Overstreet k->ptr[i] = b->key.ptr[i];
6712599b53bSKent Overstreet
6722599b53bSKent Overstreet sectors = min(sectors, b->sectors_free);
6732599b53bSKent Overstreet
6742599b53bSKent Overstreet SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
6752599b53bSKent Overstreet SET_KEY_SIZE(k, sectors);
6762599b53bSKent Overstreet SET_KEY_PTRS(k, KEY_PTRS(&b->key));
6772599b53bSKent Overstreet
6782599b53bSKent Overstreet /*
6792599b53bSKent Overstreet * Move b to the end of the lru, and keep track of what this bucket was
6802599b53bSKent Overstreet * last used for:
6812599b53bSKent Overstreet */
6822599b53bSKent Overstreet list_move_tail(&b->list, &c->data_buckets);
6832599b53bSKent Overstreet bkey_copy_key(&b->key, k);
6842599b53bSKent Overstreet b->last_write_point = write_point;
6852599b53bSKent Overstreet
6862599b53bSKent Overstreet b->sectors_free -= sectors;
6872599b53bSKent Overstreet
6882599b53bSKent Overstreet for (i = 0; i < KEY_PTRS(&b->key); i++) {
6892599b53bSKent Overstreet SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
6902599b53bSKent Overstreet
6912599b53bSKent Overstreet atomic_long_add(sectors,
69211e9560eSChristoph Hellwig &c->cache->sectors_written);
6932599b53bSKent Overstreet }
6942599b53bSKent Overstreet
6954a784266SColy Li if (b->sectors_free < c->cache->sb.block_size)
6962599b53bSKent Overstreet b->sectors_free = 0;
6972599b53bSKent Overstreet
6982599b53bSKent Overstreet /*
6992599b53bSKent Overstreet * k takes refcounts on the buckets it points to until it's inserted
7002599b53bSKent Overstreet * into the btree, but if we're done with this bucket we just transfer
7012599b53bSKent Overstreet * get_data_bucket()'s refcount.
7022599b53bSKent Overstreet */
7032599b53bSKent Overstreet if (b->sectors_free)
7042599b53bSKent Overstreet for (i = 0; i < KEY_PTRS(&b->key); i++)
7052599b53bSKent Overstreet atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
7062599b53bSKent Overstreet
7072599b53bSKent Overstreet spin_unlock(&c->data_bucket_lock);
7082599b53bSKent Overstreet return true;
7092599b53bSKent Overstreet }
7102599b53bSKent Overstreet
711cafe5635SKent Overstreet /* Init */
712cafe5635SKent Overstreet
bch_open_buckets_free(struct cache_set * c)7132599b53bSKent Overstreet void bch_open_buckets_free(struct cache_set *c)
7142599b53bSKent Overstreet {
7152599b53bSKent Overstreet struct open_bucket *b;
7162599b53bSKent Overstreet
7172599b53bSKent Overstreet while (!list_empty(&c->data_buckets)) {
7182599b53bSKent Overstreet b = list_first_entry(&c->data_buckets,
7192599b53bSKent Overstreet struct open_bucket, list);
7202599b53bSKent Overstreet list_del(&b->list);
7212599b53bSKent Overstreet kfree(b);
7222599b53bSKent Overstreet }
7232599b53bSKent Overstreet }
7242599b53bSKent Overstreet
bch_open_buckets_alloc(struct cache_set * c)7252599b53bSKent Overstreet int bch_open_buckets_alloc(struct cache_set *c)
7262599b53bSKent Overstreet {
7272599b53bSKent Overstreet int i;
7282599b53bSKent Overstreet
7292599b53bSKent Overstreet spin_lock_init(&c->data_bucket_lock);
7302599b53bSKent Overstreet
73189b1fc54STang Junhui for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
7322599b53bSKent Overstreet struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
7331fae7cf0SColy Li
7342599b53bSKent Overstreet if (!b)
7352599b53bSKent Overstreet return -ENOMEM;
7362599b53bSKent Overstreet
7372599b53bSKent Overstreet list_add(&b->list, &c->data_buckets);
7382599b53bSKent Overstreet }
7392599b53bSKent Overstreet
7402599b53bSKent Overstreet return 0;
7412599b53bSKent Overstreet }
7422599b53bSKent Overstreet
bch_cache_allocator_start(struct cache * ca)743119ba0f8SKent Overstreet int bch_cache_allocator_start(struct cache *ca)
744119ba0f8SKent Overstreet {
745764b53b2SJens Axboe struct task_struct *k = kthread_run(bch_allocator_thread,
746764b53b2SJens Axboe ca, "bcache_allocator");
74779826c35SKent Overstreet if (IS_ERR(k))
74879826c35SKent Overstreet return PTR_ERR(k);
749119ba0f8SKent Overstreet
75079826c35SKent Overstreet ca->alloc_thread = k;
751119ba0f8SKent Overstreet return 0;
752119ba0f8SKent Overstreet }
753