xref: /linux-6.15/include/linux/blk-cgroup.h (revision a37c2efc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5  * Common Block IO controller cgroup interface
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <[email protected]>
9  *
10  * Copyright (C) 2008 Fabio Checconi <[email protected]>
11  *		      Paolo Valente <[email protected]>
12  *
13  * Copyright (C) 2009 Vivek Goyal <[email protected]>
14  * 	              Nauman Rafique <[email protected]>
15  */
16 
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
24 #include <linux/fs.h>
25 
26 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
27 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
28 
29 /* Max limits for throttle policy */
30 #define THROTL_IOPS_MAX		UINT_MAX
31 
32 #ifdef CONFIG_BLK_CGROUP
33 
34 enum blkg_rwstat_type {
35 	BLKG_RWSTAT_READ,
36 	BLKG_RWSTAT_WRITE,
37 	BLKG_RWSTAT_SYNC,
38 	BLKG_RWSTAT_ASYNC,
39 	BLKG_RWSTAT_DISCARD,
40 
41 	BLKG_RWSTAT_NR,
42 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
43 };
44 
45 struct blkcg_gq;
46 
47 struct blkcg {
48 	struct cgroup_subsys_state	css;
49 	spinlock_t			lock;
50 
51 	struct radix_tree_root		blkg_tree;
52 	struct blkcg_gq	__rcu		*blkg_hint;
53 	struct hlist_head		blkg_list;
54 
55 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
56 
57 	struct list_head		all_blkcgs_node;
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 	struct list_head		cgwb_list;
60 	refcount_t			cgwb_refcnt;
61 #endif
62 };
63 
64 /*
65  * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
66  * recursive.  Used to carry stats of dead children.
67  */
68 struct blkg_rwstat {
69 	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
70 	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
71 };
72 
73 struct blkg_rwstat_sample {
74 	u64				cnt[BLKG_RWSTAT_NR];
75 };
76 
77 /*
78  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
79  * request_queue (q).  This is used by blkcg policies which need to track
80  * information per blkcg - q pair.
81  *
82  * There can be multiple active blkcg policies and each blkg:policy pair is
83  * represented by a blkg_policy_data which is allocated and freed by each
84  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
85  * area by allocating larger data structure which embeds blkg_policy_data
86  * at the beginning.
87  */
88 struct blkg_policy_data {
89 	/* the blkg and policy id this per-policy data belongs to */
90 	struct blkcg_gq			*blkg;
91 	int				plid;
92 };
93 
94 /*
95  * Policies that need to keep per-blkcg data which is independent from any
96  * request_queue associated to it should implement cpd_alloc/free_fn()
97  * methods.  A policy can allocate private data area by allocating larger
98  * data structure which embeds blkcg_policy_data at the beginning.
99  * cpd_init() is invoked to let each policy handle per-blkcg data.
100  */
101 struct blkcg_policy_data {
102 	/* the blkcg and policy id this per-policy data belongs to */
103 	struct blkcg			*blkcg;
104 	int				plid;
105 };
106 
107 /* association between a blk cgroup and a request queue */
108 struct blkcg_gq {
109 	/* Pointer to the associated request_queue */
110 	struct request_queue		*q;
111 	struct list_head		q_node;
112 	struct hlist_node		blkcg_node;
113 	struct blkcg			*blkcg;
114 
115 	/*
116 	 * Each blkg gets congested separately and the congestion state is
117 	 * propagated to the matching bdi_writeback_congested.
118 	 */
119 	struct bdi_writeback_congested	*wb_congested;
120 
121 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
122 	struct blkcg_gq			*parent;
123 
124 	/* reference count */
125 	struct percpu_ref		refcnt;
126 
127 	/* is this blkg online? protected by both blkcg and q locks */
128 	bool				online;
129 
130 	struct blkg_rwstat		stat_bytes;
131 	struct blkg_rwstat		stat_ios;
132 
133 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
134 
135 	spinlock_t			async_bio_lock;
136 	struct bio_list			async_bios;
137 	struct work_struct		async_bio_work;
138 
139 	atomic_t			use_delay;
140 	atomic64_t			delay_nsec;
141 	atomic64_t			delay_start;
142 	u64				last_delay;
143 	int				last_use;
144 
145 	struct rcu_head			rcu_head;
146 };
147 
148 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
149 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
150 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
151 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
152 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
153 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
154 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
155 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
156 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
157 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
158 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
159 				      size_t size);
160 
161 struct blkcg_policy {
162 	int				plid;
163 	/* cgroup files for the policy */
164 	struct cftype			*dfl_cftypes;
165 	struct cftype			*legacy_cftypes;
166 
167 	/* operations */
168 	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
169 	blkcg_pol_init_cpd_fn		*cpd_init_fn;
170 	blkcg_pol_free_cpd_fn		*cpd_free_fn;
171 	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
172 
173 	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
174 	blkcg_pol_init_pd_fn		*pd_init_fn;
175 	blkcg_pol_online_pd_fn		*pd_online_fn;
176 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
177 	blkcg_pol_free_pd_fn		*pd_free_fn;
178 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
179 	blkcg_pol_stat_pd_fn		*pd_stat_fn;
180 };
181 
182 extern struct blkcg blkcg_root;
183 extern struct cgroup_subsys_state * const blkcg_root_css;
184 extern bool blkcg_debug_stats;
185 
186 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
187 				      struct request_queue *q, bool update_hint);
188 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
189 				      struct request_queue *q);
190 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
191 				    struct request_queue *q);
192 int blkcg_init_queue(struct request_queue *q);
193 void blkcg_drain_queue(struct request_queue *q);
194 void blkcg_exit_queue(struct request_queue *q);
195 
196 /* Blkio controller policy registration */
197 int blkcg_policy_register(struct blkcg_policy *pol);
198 void blkcg_policy_unregister(struct blkcg_policy *pol);
199 int blkcg_activate_policy(struct request_queue *q,
200 			  const struct blkcg_policy *pol);
201 void blkcg_deactivate_policy(struct request_queue *q,
202 			     const struct blkcg_policy *pol);
203 
204 static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
205 		unsigned int idx)
206 {
207 	return atomic64_read(&rwstat->aux_cnt[idx]) +
208 		percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
209 }
210 
211 const char *blkg_dev_name(struct blkcg_gq *blkg);
212 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
213 		       u64 (*prfill)(struct seq_file *,
214 				     struct blkg_policy_data *, int),
215 		       const struct blkcg_policy *pol, int data,
216 		       bool show_total);
217 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
218 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
219 			 const struct blkg_rwstat_sample *rwstat);
220 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
221 		       int off);
222 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
223 int blkg_print_stat_ios(struct seq_file *sf, void *v);
224 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
225 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
226 
227 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
228 		int off, struct blkg_rwstat_sample *sum);
229 
230 struct blkg_conf_ctx {
231 	struct gendisk			*disk;
232 	struct blkcg_gq			*blkg;
233 	char				*body;
234 };
235 
236 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
237 		   char *input, struct blkg_conf_ctx *ctx);
238 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
239 
240 /**
241  * blkcg_css - find the current css
242  *
243  * Find the css associated with either the kthread or the current task.
244  * This may return a dying css, so it is up to the caller to use tryget logic
245  * to confirm it is alive and well.
246  */
247 static inline struct cgroup_subsys_state *blkcg_css(void)
248 {
249 	struct cgroup_subsys_state *css;
250 
251 	css = kthread_blkcg();
252 	if (css)
253 		return css;
254 	return task_css(current, io_cgrp_id);
255 }
256 
257 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
258 {
259 	return css ? container_of(css, struct blkcg, css) : NULL;
260 }
261 
262 /**
263  * __bio_blkcg - internal, inconsistent version to get blkcg
264  *
265  * DO NOT USE.
266  * This function is inconsistent and consequently is dangerous to use.  The
267  * first part of the function returns a blkcg where a reference is owned by the
268  * bio.  This means it does not need to be rcu protected as it cannot go away
269  * with the bio owning a reference to it.  However, the latter potentially gets
270  * it from task_css().  This can race against task migration and the cgroup
271  * dying.  It is also semantically different as it must be called rcu protected
272  * and is susceptible to failure when trying to get a reference to it.
273  * Therefore, it is not ok to assume that *_get() will always succeed on the
274  * blkcg returned here.
275  */
276 static inline struct blkcg *__bio_blkcg(struct bio *bio)
277 {
278 	if (bio && bio->bi_blkg)
279 		return bio->bi_blkg->blkcg;
280 	return css_to_blkcg(blkcg_css());
281 }
282 
283 /**
284  * bio_blkcg - grab the blkcg associated with a bio
285  * @bio: target bio
286  *
287  * This returns the blkcg associated with a bio, %NULL if not associated.
288  * Callers are expected to either handle %NULL or know association has been
289  * done prior to calling this.
290  */
291 static inline struct blkcg *bio_blkcg(struct bio *bio)
292 {
293 	if (bio && bio->bi_blkg)
294 		return bio->bi_blkg->blkcg;
295 	return NULL;
296 }
297 
298 static inline bool blk_cgroup_congested(void)
299 {
300 	struct cgroup_subsys_state *css;
301 	bool ret = false;
302 
303 	rcu_read_lock();
304 	css = kthread_blkcg();
305 	if (!css)
306 		css = task_css(current, io_cgrp_id);
307 	while (css) {
308 		if (atomic_read(&css->cgroup->congestion_count)) {
309 			ret = true;
310 			break;
311 		}
312 		css = css->parent;
313 	}
314 	rcu_read_unlock();
315 	return ret;
316 }
317 
318 /**
319  * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
320  * @return: true if this bio needs to be submitted with the root blkg context.
321  *
322  * In order to avoid priority inversions we sometimes need to issue a bio as if
323  * it were attached to the root blkg, and then backcharge to the actual owning
324  * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
325  * bio and attach the appropriate blkg to the bio.  Then we call this helper and
326  * if it is true run with the root blkg for that queue and then do any
327  * backcharging to the originating cgroup once the io is complete.
328  */
329 static inline bool bio_issue_as_root_blkg(struct bio *bio)
330 {
331 	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
332 }
333 
334 /**
335  * blkcg_parent - get the parent of a blkcg
336  * @blkcg: blkcg of interest
337  *
338  * Return the parent blkcg of @blkcg.  Can be called anytime.
339  */
340 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
341 {
342 	return css_to_blkcg(blkcg->css.parent);
343 }
344 
345 /**
346  * __blkg_lookup - internal version of blkg_lookup()
347  * @blkcg: blkcg of interest
348  * @q: request_queue of interest
349  * @update_hint: whether to update lookup hint with the result or not
350  *
351  * This is internal version and shouldn't be used by policy
352  * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
353  * @q's bypass state.  If @update_hint is %true, the caller should be
354  * holding @q->queue_lock and lookup hint is updated on success.
355  */
356 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
357 					     struct request_queue *q,
358 					     bool update_hint)
359 {
360 	struct blkcg_gq *blkg;
361 
362 	if (blkcg == &blkcg_root)
363 		return q->root_blkg;
364 
365 	blkg = rcu_dereference(blkcg->blkg_hint);
366 	if (blkg && blkg->q == q)
367 		return blkg;
368 
369 	return blkg_lookup_slowpath(blkcg, q, update_hint);
370 }
371 
372 /**
373  * blkg_lookup - lookup blkg for the specified blkcg - q pair
374  * @blkcg: blkcg of interest
375  * @q: request_queue of interest
376  *
377  * Lookup blkg for the @blkcg - @q pair.  This function should be called
378  * under RCU read loc.
379  */
380 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
381 					   struct request_queue *q)
382 {
383 	WARN_ON_ONCE(!rcu_read_lock_held());
384 	return __blkg_lookup(blkcg, q, false);
385 }
386 
387 /**
388  * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
389  * @q: request_queue of interest
390  *
391  * Lookup blkg for @q at the root level. See also blkg_lookup().
392  */
393 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
394 {
395 	return q->root_blkg;
396 }
397 
398 /**
399  * blkg_to_pdata - get policy private data
400  * @blkg: blkg of interest
401  * @pol: policy of interest
402  *
403  * Return pointer to private data associated with the @blkg-@pol pair.
404  */
405 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
406 						  struct blkcg_policy *pol)
407 {
408 	return blkg ? blkg->pd[pol->plid] : NULL;
409 }
410 
411 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
412 						     struct blkcg_policy *pol)
413 {
414 	return blkcg ? blkcg->cpd[pol->plid] : NULL;
415 }
416 
417 /**
418  * pdata_to_blkg - get blkg associated with policy private data
419  * @pd: policy private data of interest
420  *
421  * @pd is policy private data.  Determine the blkg it's associated with.
422  */
423 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
424 {
425 	return pd ? pd->blkg : NULL;
426 }
427 
428 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
429 {
430 	return cpd ? cpd->blkcg : NULL;
431 }
432 
433 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
434 
435 #ifdef CONFIG_CGROUP_WRITEBACK
436 
437 /**
438  * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
439  * @blkcg: blkcg of interest
440  *
441  * This is used to track the number of active wb's related to a blkcg.
442  */
443 static inline void blkcg_cgwb_get(struct blkcg *blkcg)
444 {
445 	refcount_inc(&blkcg->cgwb_refcnt);
446 }
447 
448 /**
449  * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
450  * @blkcg: blkcg of interest
451  *
452  * This is used to track the number of active wb's related to a blkcg.
453  * When this count goes to zero, all active wb has finished so the
454  * blkcg can continue destruction by calling blkcg_destroy_blkgs().
455  * This work may occur in cgwb_release_workfn() on the cgwb_release
456  * workqueue.
457  */
458 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
459 {
460 	if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
461 		blkcg_destroy_blkgs(blkcg);
462 }
463 
464 #else
465 
466 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
467 
468 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
469 {
470 	/* wb isn't being accounted, so trigger destruction right away */
471 	blkcg_destroy_blkgs(blkcg);
472 }
473 
474 #endif
475 
476 /**
477  * blkg_path - format cgroup path of blkg
478  * @blkg: blkg of interest
479  * @buf: target buffer
480  * @buflen: target buffer length
481  *
482  * Format the path of the cgroup of @blkg into @buf.
483  */
484 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
485 {
486 	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
487 }
488 
489 /**
490  * blkg_get - get a blkg reference
491  * @blkg: blkg to get
492  *
493  * The caller should be holding an existing reference.
494  */
495 static inline void blkg_get(struct blkcg_gq *blkg)
496 {
497 	percpu_ref_get(&blkg->refcnt);
498 }
499 
500 /**
501  * blkg_tryget - try and get a blkg reference
502  * @blkg: blkg to get
503  *
504  * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
505  * of freeing this blkg, so we can only use it if the refcnt is not zero.
506  */
507 static inline bool blkg_tryget(struct blkcg_gq *blkg)
508 {
509 	return blkg && percpu_ref_tryget(&blkg->refcnt);
510 }
511 
512 /**
513  * blkg_tryget_closest - try and get a blkg ref on the closet blkg
514  * @blkg: blkg to get
515  *
516  * This needs to be called rcu protected.  As the failure mode here is to walk
517  * up the blkg tree, this ensure that the blkg->parent pointers are always
518  * valid.  This returns the blkg that it ended up taking a reference on or %NULL
519  * if no reference was taken.
520  */
521 static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
522 {
523 	struct blkcg_gq *ret_blkg = NULL;
524 
525 	WARN_ON_ONCE(!rcu_read_lock_held());
526 
527 	while (blkg) {
528 		if (blkg_tryget(blkg)) {
529 			ret_blkg = blkg;
530 			break;
531 		}
532 		blkg = blkg->parent;
533 	}
534 
535 	return ret_blkg;
536 }
537 
538 /**
539  * blkg_put - put a blkg reference
540  * @blkg: blkg to put
541  */
542 static inline void blkg_put(struct blkcg_gq *blkg)
543 {
544 	percpu_ref_put(&blkg->refcnt);
545 }
546 
547 /**
548  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
549  * @d_blkg: loop cursor pointing to the current descendant
550  * @pos_css: used for iteration
551  * @p_blkg: target blkg to walk descendants of
552  *
553  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
554  * read locked.  If called under either blkcg or queue lock, the iteration
555  * is guaranteed to include all and only online blkgs.  The caller may
556  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
557  * @p_blkg is included in the iteration and the first node to be visited.
558  */
559 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
560 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
561 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
562 					      (p_blkg)->q, false)))
563 
564 /**
565  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
566  * @d_blkg: loop cursor pointing to the current descendant
567  * @pos_css: used for iteration
568  * @p_blkg: target blkg to walk descendants of
569  *
570  * Similar to blkg_for_each_descendant_pre() but performs post-order
571  * traversal instead.  Synchronization rules are the same.  @p_blkg is
572  * included in the iteration and the last node to be visited.
573  */
574 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
575 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
576 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
577 					      (p_blkg)->q, false)))
578 
579 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
580 {
581 	int i, ret;
582 
583 	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
584 		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
585 		if (ret) {
586 			while (--i >= 0)
587 				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
588 			return ret;
589 		}
590 		atomic64_set(&rwstat->aux_cnt[i], 0);
591 	}
592 	return 0;
593 }
594 
595 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
596 {
597 	int i;
598 
599 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
600 		percpu_counter_destroy(&rwstat->cpu_cnt[i]);
601 }
602 
603 /**
604  * blkg_rwstat_add - add a value to a blkg_rwstat
605  * @rwstat: target blkg_rwstat
606  * @op: REQ_OP and flags
607  * @val: value to add
608  *
609  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
610  * caller is responsible for synchronizing calls to this function.
611  */
612 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
613 				   unsigned int op, uint64_t val)
614 {
615 	struct percpu_counter *cnt;
616 
617 	if (op_is_discard(op))
618 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
619 	else if (op_is_write(op))
620 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
621 	else
622 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
623 
624 	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
625 
626 	if (op_is_sync(op))
627 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
628 	else
629 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
630 
631 	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
632 }
633 
634 /**
635  * blkg_rwstat_read - read the current values of a blkg_rwstat
636  * @rwstat: blkg_rwstat to read
637  *
638  * Read the current snapshot of @rwstat and return it in the aux counts.
639  */
640 static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
641 		struct blkg_rwstat_sample *result)
642 {
643 	int i;
644 
645 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
646 		result->cnt[i] =
647 			percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
648 }
649 
650 /**
651  * blkg_rwstat_total - read the total count of a blkg_rwstat
652  * @rwstat: blkg_rwstat to read
653  *
654  * Return the total count of @rwstat regardless of the IO direction.  This
655  * function can be called without synchronization and takes care of u64
656  * atomicity.
657  */
658 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
659 {
660 	struct blkg_rwstat_sample tmp = { };
661 
662 	blkg_rwstat_read(rwstat, &tmp);
663 	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
664 }
665 
666 /**
667  * blkg_rwstat_reset - reset a blkg_rwstat
668  * @rwstat: blkg_rwstat to reset
669  */
670 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
671 {
672 	int i;
673 
674 	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
675 		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
676 		atomic64_set(&rwstat->aux_cnt[i], 0);
677 	}
678 }
679 
680 /**
681  * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
682  * @to: the destination blkg_rwstat
683  * @from: the source
684  *
685  * Add @from's count including the aux one to @to's aux count.
686  */
687 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
688 				       struct blkg_rwstat *from)
689 {
690 	u64 sum[BLKG_RWSTAT_NR];
691 	int i;
692 
693 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
694 		sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
695 
696 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
697 		atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
698 			     &to->aux_cnt[i]);
699 }
700 
701 #ifdef CONFIG_BLK_DEV_THROTTLING
702 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
703 			   struct bio *bio);
704 #else
705 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
706 				  struct bio *bio) { return false; }
707 #endif
708 
709 bool __blkcg_punt_bio_submit(struct bio *bio);
710 
711 static inline bool blkcg_punt_bio_submit(struct bio *bio)
712 {
713 	if (bio->bi_opf & REQ_CGROUP_PUNT)
714 		return __blkcg_punt_bio_submit(bio);
715 	else
716 		return false;
717 }
718 
719 static inline void blkcg_bio_issue_init(struct bio *bio)
720 {
721 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
722 }
723 
724 static inline bool blkcg_bio_issue_check(struct request_queue *q,
725 					 struct bio *bio)
726 {
727 	struct blkcg_gq *blkg;
728 	bool throtl = false;
729 
730 	rcu_read_lock();
731 
732 	if (!bio->bi_blkg) {
733 		char b[BDEVNAME_SIZE];
734 
735 		WARN_ONCE(1,
736 			  "no blkg associated for bio on block-device: %s\n",
737 			  bio_devname(bio, b));
738 		bio_associate_blkg(bio);
739 	}
740 
741 	blkg = bio->bi_blkg;
742 
743 	throtl = blk_throtl_bio(q, blkg, bio);
744 
745 	if (!throtl) {
746 		/*
747 		 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
748 		 * is a split bio and we would have already accounted for the
749 		 * size of the bio.
750 		 */
751 		if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
752 			blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
753 					bio->bi_iter.bi_size);
754 		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
755 	}
756 
757 	blkcg_bio_issue_init(bio);
758 
759 	rcu_read_unlock();
760 	return !throtl;
761 }
762 
763 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
764 {
765 	if (atomic_add_return(1, &blkg->use_delay) == 1)
766 		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
767 }
768 
769 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
770 {
771 	int old = atomic_read(&blkg->use_delay);
772 
773 	if (old == 0)
774 		return 0;
775 
776 	/*
777 	 * We do this song and dance because we can race with somebody else
778 	 * adding or removing delay.  If we just did an atomic_dec we'd end up
779 	 * negative and we'd already be in trouble.  We need to subtract 1 and
780 	 * then check to see if we were the last delay so we can drop the
781 	 * congestion count on the cgroup.
782 	 */
783 	while (old) {
784 		int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
785 		if (cur == old)
786 			break;
787 		old = cur;
788 	}
789 
790 	if (old == 0)
791 		return 0;
792 	if (old == 1)
793 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
794 	return 1;
795 }
796 
797 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
798 {
799 	int old = atomic_read(&blkg->use_delay);
800 	if (!old)
801 		return;
802 	/* We only want 1 person clearing the congestion count for this blkg. */
803 	while (old) {
804 		int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
805 		if (cur == old) {
806 			atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
807 			break;
808 		}
809 		old = cur;
810 	}
811 }
812 
813 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
814 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
815 void blkcg_maybe_throttle_current(void);
816 #else	/* CONFIG_BLK_CGROUP */
817 
818 struct blkcg {
819 };
820 
821 struct blkg_policy_data {
822 };
823 
824 struct blkcg_policy_data {
825 };
826 
827 struct blkcg_gq {
828 };
829 
830 struct blkcg_policy {
831 };
832 
833 #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
834 
835 static inline void blkcg_maybe_throttle_current(void) { }
836 static inline bool blk_cgroup_congested(void) { return false; }
837 
838 #ifdef CONFIG_BLOCK
839 
840 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
841 
842 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
843 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
844 { return NULL; }
845 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
846 static inline void blkcg_drain_queue(struct request_queue *q) { }
847 static inline void blkcg_exit_queue(struct request_queue *q) { }
848 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
849 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
850 static inline int blkcg_activate_policy(struct request_queue *q,
851 					const struct blkcg_policy *pol) { return 0; }
852 static inline void blkcg_deactivate_policy(struct request_queue *q,
853 					   const struct blkcg_policy *pol) { }
854 
855 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
856 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
857 
858 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
859 						  struct blkcg_policy *pol) { return NULL; }
860 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
861 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
862 static inline void blkg_get(struct blkcg_gq *blkg) { }
863 static inline void blkg_put(struct blkcg_gq *blkg) { }
864 
865 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
866 static inline void blkcg_bio_issue_init(struct bio *bio) { }
867 static inline bool blkcg_bio_issue_check(struct request_queue *q,
868 					 struct bio *bio) { return true; }
869 
870 #define blk_queue_for_each_rl(rl, q)	\
871 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
872 
873 #endif	/* CONFIG_BLOCK */
874 #endif	/* CONFIG_BLK_CGROUP */
875 #endif	/* _BLK_CGROUP_H */
876