xref: /linux-6.15/drivers/md/bcache/request.h (revision 2c1953e2)
1 #ifndef _BCACHE_REQUEST_H_
2 #define _BCACHE_REQUEST_H_
3 
4 #include <linux/cgroup.h>
5 
6 struct search {
7 	/* Stack frame for bio_complete */
8 	struct closure		cl;
9 
10 	struct bcache_device	*d;
11 	struct task_struct	*task;
12 
13 	struct bbio		bio;
14 	struct bio		*orig_bio;
15 	struct bio		*cache_miss;
16 	unsigned		cache_bio_sectors;
17 
18 	unsigned		recoverable:1;
19 	unsigned		unaligned_bvec:1;
20 
21 	unsigned		write:1;
22 	unsigned		writeback:1;
23 
24 	/* IO error returned to s->bio */
25 	short			error;
26 	unsigned long		start_time;
27 
28 	struct btree_op		op;
29 
30 	/* Anything past this point won't get zeroed in search_alloc() */
31 	struct keylist		insert_keys;
32 };
33 
34 unsigned bch_get_congested(struct cache_set *);
35 void bch_data_insert(struct closure *cl);
36 
37 void bch_open_buckets_free(struct cache_set *);
38 int bch_open_buckets_alloc(struct cache_set *);
39 
40 void bch_cached_dev_request_init(struct cached_dev *dc);
41 void bch_flash_dev_request_init(struct bcache_device *d);
42 
43 extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
44 
45 struct bch_cgroup {
46 #ifdef CONFIG_CGROUP_BCACHE
47 	struct cgroup_subsys_state	css;
48 #endif
49 	/*
50 	 * We subtract one from the index into bch_cache_modes[], so that
51 	 * default == -1; this makes it so the rest match up with d->cache_mode,
52 	 * and we use d->cache_mode if cgrp->cache_mode < 0
53 	 */
54 	short				cache_mode;
55 	bool				verify;
56 	struct cache_stat_collector	stats;
57 };
58 
59 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
60 
61 #endif /* _BCACHE_REQUEST_H_ */
62