xref: /linux-6.15/include/linux/blk-cgroup.h (revision e4df2d5e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5  * Common Block IO controller cgroup interface
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <[email protected]>
9  *
10  * Copyright (C) 2008 Fabio Checconi <[email protected]>
11  *		      Paolo Valente <[email protected]>
12  *
13  * Copyright (C) 2009 Vivek Goyal <[email protected]>
14  * 	              Nauman Rafique <[email protected]>
15  */
16 
17 #include <linux/cgroup.h>
18 #include <linux/percpu.h>
19 #include <linux/percpu_counter.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/seq_file.h>
22 #include <linux/radix-tree.h>
23 #include <linux/blkdev.h>
24 #include <linux/atomic.h>
25 #include <linux/kthread.h>
26 #include <linux/fs.h>
27 
28 #define FC_APPID_LEN              129
29 
30 #ifdef CONFIG_BLK_CGROUP
31 
32 enum blkg_iostat_type {
33 	BLKG_IOSTAT_READ,
34 	BLKG_IOSTAT_WRITE,
35 	BLKG_IOSTAT_DISCARD,
36 
37 	BLKG_IOSTAT_NR,
38 };
39 
40 struct blkcg_gq;
41 struct blkg_policy_data;
42 
43 struct blkcg {
44 	struct cgroup_subsys_state	css;
45 	spinlock_t			lock;
46 	refcount_t			online_pin;
47 
48 	struct radix_tree_root		blkg_tree;
49 	struct blkcg_gq	__rcu		*blkg_hint;
50 	struct hlist_head		blkg_list;
51 
52 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
53 
54 	struct list_head		all_blkcgs_node;
55 #ifdef CONFIG_BLK_CGROUP_FC_APPID
56 	char                            fc_app_id[FC_APPID_LEN];
57 #endif
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 	struct list_head		cgwb_list;
60 #endif
61 };
62 
63 struct blkg_iostat {
64 	u64				bytes[BLKG_IOSTAT_NR];
65 	u64				ios[BLKG_IOSTAT_NR];
66 };
67 
68 struct blkg_iostat_set {
69 	struct u64_stats_sync		sync;
70 	struct blkg_iostat		cur;
71 	struct blkg_iostat		last;
72 };
73 
74 /* association between a blk cgroup and a request queue */
75 struct blkcg_gq {
76 	/* Pointer to the associated request_queue */
77 	struct request_queue		*q;
78 	struct list_head		q_node;
79 	struct hlist_node		blkcg_node;
80 	struct blkcg			*blkcg;
81 
82 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
83 	struct blkcg_gq			*parent;
84 
85 	/* reference count */
86 	struct percpu_ref		refcnt;
87 
88 	/* is this blkg online? protected by both blkcg and q locks */
89 	bool				online;
90 
91 	struct blkg_iostat_set __percpu	*iostat_cpu;
92 	struct blkg_iostat_set		iostat;
93 
94 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
95 
96 	spinlock_t			async_bio_lock;
97 	struct bio_list			async_bios;
98 	union {
99 		struct work_struct	async_bio_work;
100 		struct work_struct	free_work;
101 	};
102 
103 	atomic_t			use_delay;
104 	atomic64_t			delay_nsec;
105 	atomic64_t			delay_start;
106 	u64				last_delay;
107 	int				last_use;
108 
109 	struct rcu_head			rcu_head;
110 };
111 
112 extern struct cgroup_subsys_state * const blkcg_root_css;
113 
114 void blkcg_destroy_blkgs(struct blkcg *blkcg);
115 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
116 void blkcg_maybe_throttle_current(void);
117 
118 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
119 {
120 	return css ? container_of(css, struct blkcg, css) : NULL;
121 }
122 
123 /**
124  * bio_blkcg - grab the blkcg associated with a bio
125  * @bio: target bio
126  *
127  * This returns the blkcg associated with a bio, %NULL if not associated.
128  * Callers are expected to either handle %NULL or know association has been
129  * done prior to calling this.
130  */
131 static inline struct blkcg *bio_blkcg(struct bio *bio)
132 {
133 	if (bio && bio->bi_blkg)
134 		return bio->bi_blkg->blkcg;
135 	return NULL;
136 }
137 
138 static inline bool blk_cgroup_congested(void)
139 {
140 	struct cgroup_subsys_state *css;
141 	bool ret = false;
142 
143 	rcu_read_lock();
144 	css = kthread_blkcg();
145 	if (!css)
146 		css = task_css(current, io_cgrp_id);
147 	while (css) {
148 		if (atomic_read(&css->cgroup->congestion_count)) {
149 			ret = true;
150 			break;
151 		}
152 		css = css->parent;
153 	}
154 	rcu_read_unlock();
155 	return ret;
156 }
157 
158 /**
159  * blkcg_parent - get the parent of a blkcg
160  * @blkcg: blkcg of interest
161  *
162  * Return the parent blkcg of @blkcg.  Can be called anytime.
163  */
164 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
165 {
166 	return css_to_blkcg(blkcg->css.parent);
167 }
168 
169 /**
170  * blkcg_pin_online - pin online state
171  * @blkcg: blkcg of interest
172  *
173  * While pinned, a blkcg is kept online.  This is primarily used to
174  * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
175  * while an associated cgwb is still active.
176  */
177 static inline void blkcg_pin_online(struct blkcg *blkcg)
178 {
179 	refcount_inc(&blkcg->online_pin);
180 }
181 
182 /**
183  * blkcg_unpin_online - unpin online state
184  * @blkcg: blkcg of interest
185  *
186  * This is primarily used to impedance-match blkg and cgwb lifetimes so
187  * that blkg doesn't go offline while an associated cgwb is still active.
188  * When this count goes to zero, all active cgwbs have finished so the
189  * blkcg can continue destruction by calling blkcg_destroy_blkgs().
190  */
191 static inline void blkcg_unpin_online(struct blkcg *blkcg)
192 {
193 	do {
194 		if (!refcount_dec_and_test(&blkcg->online_pin))
195 			break;
196 		blkcg_destroy_blkgs(blkcg);
197 		blkcg = blkcg_parent(blkcg);
198 	} while (blkcg);
199 }
200 
201 #else	/* CONFIG_BLK_CGROUP */
202 
203 struct blkcg {
204 };
205 
206 struct blkcg_gq {
207 };
208 
209 #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
210 
211 static inline void blkcg_maybe_throttle_current(void) { }
212 static inline bool blk_cgroup_congested(void) { return false; }
213 
214 #ifdef CONFIG_BLOCK
215 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
216 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
217 #endif /* CONFIG_BLOCK */
218 
219 #endif	/* CONFIG_BLK_CGROUP */
220 
221 #ifdef CONFIG_BLK_CGROUP_FC_APPID
222 /*
223  * Sets the fc_app_id field associted to blkcg
224  * @app_id: application identifier
225  * @cgrp_id: cgroup id
226  * @app_id_len: size of application identifier
227  */
228 static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
229 {
230 	struct cgroup *cgrp;
231 	struct cgroup_subsys_state *css;
232 	struct blkcg *blkcg;
233 	int ret  = 0;
234 
235 	if (app_id_len > FC_APPID_LEN)
236 		return -EINVAL;
237 
238 	cgrp = cgroup_get_from_id(cgrp_id);
239 	if (!cgrp)
240 		return -ENOENT;
241 	css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
242 	if (!css) {
243 		ret = -ENOENT;
244 		goto out_cgrp_put;
245 	}
246 	blkcg = css_to_blkcg(css);
247 	/*
248 	 * There is a slight race condition on setting the appid.
249 	 * Worst case an I/O may not find the right id.
250 	 * This is no different from the I/O we let pass while obtaining
251 	 * the vmid from the fabric.
252 	 * Adding the overhead of a lock is not necessary.
253 	 */
254 	strlcpy(blkcg->fc_app_id, app_id, app_id_len);
255 	css_put(css);
256 out_cgrp_put:
257 	cgroup_put(cgrp);
258 	return ret;
259 }
260 
261 /**
262  * blkcg_get_fc_appid - get the fc app identifier associated with a bio
263  * @bio: target bio
264  *
265  * On success return the fc_app_id, on failure return NULL
266  */
267 static inline char *blkcg_get_fc_appid(struct bio *bio)
268 {
269 	if (bio && bio->bi_blkg &&
270 		(bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
271 		return bio->bi_blkg->blkcg->fc_app_id;
272 	return NULL;
273 }
274 #else
275 static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
276 static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
277 #endif /*CONFIG_BLK_CGROUP_FC_APPID*/
278 #endif	/* _BLK_CGROUP_H */
279