xref: /linux-6.15/include/linux/blk-cgroup.h (revision 8ce40a2f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5  * Common Block IO controller cgroup interface
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <[email protected]>
9  *
10  * Copyright (C) 2008 Fabio Checconi <[email protected]>
11  *		      Paolo Valente <[email protected]>
12  *
13  * Copyright (C) 2009 Vivek Goyal <[email protected]>
14  * 	              Nauman Rafique <[email protected]>
15  */
16 
17 #include <linux/cgroup.h>
18 #include <linux/percpu.h>
19 #include <linux/percpu_counter.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/seq_file.h>
22 #include <linux/radix-tree.h>
23 #include <linux/blkdev.h>
24 #include <linux/atomic.h>
25 #include <linux/kthread.h>
26 #include <linux/fs.h>
27 
28 #define FC_APPID_LEN              129
29 
30 #ifdef CONFIG_BLK_CGROUP
31 
32 enum blkg_iostat_type {
33 	BLKG_IOSTAT_READ,
34 	BLKG_IOSTAT_WRITE,
35 	BLKG_IOSTAT_DISCARD,
36 
37 	BLKG_IOSTAT_NR,
38 };
39 
40 struct blkcg_gq;
41 struct blkg_policy_data;
42 
43 struct blkcg {
44 	struct cgroup_subsys_state	css;
45 	spinlock_t			lock;
46 	refcount_t			online_pin;
47 
48 	struct radix_tree_root		blkg_tree;
49 	struct blkcg_gq	__rcu		*blkg_hint;
50 	struct hlist_head		blkg_list;
51 
52 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
53 
54 	struct list_head		all_blkcgs_node;
55 #ifdef CONFIG_BLK_CGROUP_FC_APPID
56 	char                            fc_app_id[FC_APPID_LEN];
57 #endif
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 	struct list_head		cgwb_list;
60 #endif
61 };
62 
63 struct blkg_iostat {
64 	u64				bytes[BLKG_IOSTAT_NR];
65 	u64				ios[BLKG_IOSTAT_NR];
66 };
67 
68 struct blkg_iostat_set {
69 	struct u64_stats_sync		sync;
70 	struct blkg_iostat		cur;
71 	struct blkg_iostat		last;
72 };
73 
74 /* association between a blk cgroup and a request queue */
75 struct blkcg_gq {
76 	/* Pointer to the associated request_queue */
77 	struct request_queue		*q;
78 	struct list_head		q_node;
79 	struct hlist_node		blkcg_node;
80 	struct blkcg			*blkcg;
81 
82 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
83 	struct blkcg_gq			*parent;
84 
85 	/* reference count */
86 	struct percpu_ref		refcnt;
87 
88 	/* is this blkg online? protected by both blkcg and q locks */
89 	bool				online;
90 
91 	struct blkg_iostat_set __percpu	*iostat_cpu;
92 	struct blkg_iostat_set		iostat;
93 
94 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
95 
96 	spinlock_t			async_bio_lock;
97 	struct bio_list			async_bios;
98 	struct work_struct		async_bio_work;
99 
100 	atomic_t			use_delay;
101 	atomic64_t			delay_nsec;
102 	atomic64_t			delay_start;
103 	u64				last_delay;
104 	int				last_use;
105 
106 	struct rcu_head			rcu_head;
107 };
108 
109 extern struct cgroup_subsys_state * const blkcg_root_css;
110 
111 void blkcg_destroy_blkgs(struct blkcg *blkcg);
112 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
113 void blkcg_maybe_throttle_current(void);
114 
115 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
116 {
117 	return css ? container_of(css, struct blkcg, css) : NULL;
118 }
119 
120 /**
121  * bio_blkcg - grab the blkcg associated with a bio
122  * @bio: target bio
123  *
124  * This returns the blkcg associated with a bio, %NULL if not associated.
125  * Callers are expected to either handle %NULL or know association has been
126  * done prior to calling this.
127  */
128 static inline struct blkcg *bio_blkcg(struct bio *bio)
129 {
130 	if (bio && bio->bi_blkg)
131 		return bio->bi_blkg->blkcg;
132 	return NULL;
133 }
134 
135 static inline bool blk_cgroup_congested(void)
136 {
137 	struct cgroup_subsys_state *css;
138 	bool ret = false;
139 
140 	rcu_read_lock();
141 	css = kthread_blkcg();
142 	if (!css)
143 		css = task_css(current, io_cgrp_id);
144 	while (css) {
145 		if (atomic_read(&css->cgroup->congestion_count)) {
146 			ret = true;
147 			break;
148 		}
149 		css = css->parent;
150 	}
151 	rcu_read_unlock();
152 	return ret;
153 }
154 
155 /**
156  * blkcg_parent - get the parent of a blkcg
157  * @blkcg: blkcg of interest
158  *
159  * Return the parent blkcg of @blkcg.  Can be called anytime.
160  */
161 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
162 {
163 	return css_to_blkcg(blkcg->css.parent);
164 }
165 
166 /**
167  * blkcg_pin_online - pin online state
168  * @blkcg: blkcg of interest
169  *
170  * While pinned, a blkcg is kept online.  This is primarily used to
171  * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
172  * while an associated cgwb is still active.
173  */
174 static inline void blkcg_pin_online(struct blkcg *blkcg)
175 {
176 	refcount_inc(&blkcg->online_pin);
177 }
178 
179 /**
180  * blkcg_unpin_online - unpin online state
181  * @blkcg: blkcg of interest
182  *
183  * This is primarily used to impedance-match blkg and cgwb lifetimes so
184  * that blkg doesn't go offline while an associated cgwb is still active.
185  * When this count goes to zero, all active cgwbs have finished so the
186  * blkcg can continue destruction by calling blkcg_destroy_blkgs().
187  */
188 static inline void blkcg_unpin_online(struct blkcg *blkcg)
189 {
190 	do {
191 		if (!refcount_dec_and_test(&blkcg->online_pin))
192 			break;
193 		blkcg_destroy_blkgs(blkcg);
194 		blkcg = blkcg_parent(blkcg);
195 	} while (blkcg);
196 }
197 
198 #else	/* CONFIG_BLK_CGROUP */
199 
200 struct blkcg {
201 };
202 
203 struct blkcg_gq {
204 };
205 
206 #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
207 
208 static inline void blkcg_maybe_throttle_current(void) { }
209 static inline bool blk_cgroup_congested(void) { return false; }
210 
211 #ifdef CONFIG_BLOCK
212 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
213 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
214 #endif /* CONFIG_BLOCK */
215 
216 #endif	/* CONFIG_BLK_CGROUP */
217 
218 #ifdef CONFIG_BLK_CGROUP_FC_APPID
219 /*
220  * Sets the fc_app_id field associted to blkcg
221  * @app_id: application identifier
222  * @cgrp_id: cgroup id
223  * @app_id_len: size of application identifier
224  */
225 static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
226 {
227 	struct cgroup *cgrp;
228 	struct cgroup_subsys_state *css;
229 	struct blkcg *blkcg;
230 	int ret  = 0;
231 
232 	if (app_id_len > FC_APPID_LEN)
233 		return -EINVAL;
234 
235 	cgrp = cgroup_get_from_id(cgrp_id);
236 	if (!cgrp)
237 		return -ENOENT;
238 	css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
239 	if (!css) {
240 		ret = -ENOENT;
241 		goto out_cgrp_put;
242 	}
243 	blkcg = css_to_blkcg(css);
244 	/*
245 	 * There is a slight race condition on setting the appid.
246 	 * Worst case an I/O may not find the right id.
247 	 * This is no different from the I/O we let pass while obtaining
248 	 * the vmid from the fabric.
249 	 * Adding the overhead of a lock is not necessary.
250 	 */
251 	strlcpy(blkcg->fc_app_id, app_id, app_id_len);
252 	css_put(css);
253 out_cgrp_put:
254 	cgroup_put(cgrp);
255 	return ret;
256 }
257 
258 /**
259  * blkcg_get_fc_appid - get the fc app identifier associated with a bio
260  * @bio: target bio
261  *
262  * On success return the fc_app_id, on failure return NULL
263  */
264 static inline char *blkcg_get_fc_appid(struct bio *bio)
265 {
266 	if (bio && bio->bi_blkg &&
267 		(bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
268 		return bio->bi_blkg->blkcg->fc_app_id;
269 	return NULL;
270 }
271 #else
272 static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
273 static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
274 #endif /*CONFIG_BLK_CGROUP_FC_APPID*/
275 #endif	/* _BLK_CGROUP_H */
276