xref: /linux-6.15/include/linux/blk-cgroup.h (revision e00a844a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5  * Common Block IO controller cgroup interface
6  *
7  * Based on ideas and code from CFQ, CFS and BFQ:
8  * Copyright (C) 2003 Jens Axboe <[email protected]>
9  *
10  * Copyright (C) 2008 Fabio Checconi <[email protected]>
11  *		      Paolo Valente <[email protected]>
12  *
13  * Copyright (C) 2009 Vivek Goyal <[email protected]>
14  * 	              Nauman Rafique <[email protected]>
15  */
16 
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
24 
25 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
26 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
27 
28 /* Max limits for throttle policy */
29 #define THROTL_IOPS_MAX		UINT_MAX
30 
31 #ifdef CONFIG_BLK_CGROUP
32 
33 enum blkg_rwstat_type {
34 	BLKG_RWSTAT_READ,
35 	BLKG_RWSTAT_WRITE,
36 	BLKG_RWSTAT_SYNC,
37 	BLKG_RWSTAT_ASYNC,
38 
39 	BLKG_RWSTAT_NR,
40 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
41 };
42 
43 struct blkcg_gq;
44 
45 struct blkcg {
46 	struct cgroup_subsys_state	css;
47 	spinlock_t			lock;
48 
49 	struct radix_tree_root		blkg_tree;
50 	struct blkcg_gq	__rcu		*blkg_hint;
51 	struct hlist_head		blkg_list;
52 
53 	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
54 
55 	struct list_head		all_blkcgs_node;
56 #ifdef CONFIG_CGROUP_WRITEBACK
57 	struct list_head		cgwb_list;
58 #endif
59 };
60 
61 /*
62  * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
63  * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
64  * to carry result values from read and sum operations.
65  */
66 struct blkg_stat {
67 	struct percpu_counter		cpu_cnt;
68 	atomic64_t			aux_cnt;
69 };
70 
71 struct blkg_rwstat {
72 	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
73 	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
74 };
75 
76 /*
77  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
78  * request_queue (q).  This is used by blkcg policies which need to track
79  * information per blkcg - q pair.
80  *
81  * There can be multiple active blkcg policies and each blkg:policy pair is
82  * represented by a blkg_policy_data which is allocated and freed by each
83  * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
84  * area by allocating larger data structure which embeds blkg_policy_data
85  * at the beginning.
86  */
87 struct blkg_policy_data {
88 	/* the blkg and policy id this per-policy data belongs to */
89 	struct blkcg_gq			*blkg;
90 	int				plid;
91 };
92 
93 /*
94  * Policies that need to keep per-blkcg data which is independent from any
95  * request_queue associated to it should implement cpd_alloc/free_fn()
96  * methods.  A policy can allocate private data area by allocating larger
97  * data structure which embeds blkcg_policy_data at the beginning.
98  * cpd_init() is invoked to let each policy handle per-blkcg data.
99  */
100 struct blkcg_policy_data {
101 	/* the blkcg and policy id this per-policy data belongs to */
102 	struct blkcg			*blkcg;
103 	int				plid;
104 };
105 
106 /* association between a blk cgroup and a request queue */
107 struct blkcg_gq {
108 	/* Pointer to the associated request_queue */
109 	struct request_queue		*q;
110 	struct list_head		q_node;
111 	struct hlist_node		blkcg_node;
112 	struct blkcg			*blkcg;
113 
114 	/*
115 	 * Each blkg gets congested separately and the congestion state is
116 	 * propagated to the matching bdi_writeback_congested.
117 	 */
118 	struct bdi_writeback_congested	*wb_congested;
119 
120 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
121 	struct blkcg_gq			*parent;
122 
123 	/* request allocation list for this blkcg-q pair */
124 	struct request_list		rl;
125 
126 	/* reference count */
127 	atomic_t			refcnt;
128 
129 	/* is this blkg online? protected by both blkcg and q locks */
130 	bool				online;
131 
132 	struct blkg_rwstat		stat_bytes;
133 	struct blkg_rwstat		stat_ios;
134 
135 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
136 
137 	struct rcu_head			rcu_head;
138 };
139 
140 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
141 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
142 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
143 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
144 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
145 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
146 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
147 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
148 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
149 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
150 
151 struct blkcg_policy {
152 	int				plid;
153 	/* cgroup files for the policy */
154 	struct cftype			*dfl_cftypes;
155 	struct cftype			*legacy_cftypes;
156 
157 	/* operations */
158 	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
159 	blkcg_pol_init_cpd_fn		*cpd_init_fn;
160 	blkcg_pol_free_cpd_fn		*cpd_free_fn;
161 	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
162 
163 	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
164 	blkcg_pol_init_pd_fn		*pd_init_fn;
165 	blkcg_pol_online_pd_fn		*pd_online_fn;
166 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
167 	blkcg_pol_free_pd_fn		*pd_free_fn;
168 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
169 };
170 
171 extern struct blkcg blkcg_root;
172 extern struct cgroup_subsys_state * const blkcg_root_css;
173 
174 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
175 				      struct request_queue *q, bool update_hint);
176 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
177 				    struct request_queue *q);
178 int blkcg_init_queue(struct request_queue *q);
179 void blkcg_drain_queue(struct request_queue *q);
180 void blkcg_exit_queue(struct request_queue *q);
181 
182 /* Blkio controller policy registration */
183 int blkcg_policy_register(struct blkcg_policy *pol);
184 void blkcg_policy_unregister(struct blkcg_policy *pol);
185 int blkcg_activate_policy(struct request_queue *q,
186 			  const struct blkcg_policy *pol);
187 void blkcg_deactivate_policy(struct request_queue *q,
188 			     const struct blkcg_policy *pol);
189 
190 const char *blkg_dev_name(struct blkcg_gq *blkg);
191 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
192 		       u64 (*prfill)(struct seq_file *,
193 				     struct blkg_policy_data *, int),
194 		       const struct blkcg_policy *pol, int data,
195 		       bool show_total);
196 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
197 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
198 			 const struct blkg_rwstat *rwstat);
199 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
200 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
201 		       int off);
202 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
203 int blkg_print_stat_ios(struct seq_file *sf, void *v);
204 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
205 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
206 
207 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
208 			    struct blkcg_policy *pol, int off);
209 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
210 					     struct blkcg_policy *pol, int off);
211 
212 struct blkg_conf_ctx {
213 	struct gendisk			*disk;
214 	struct blkcg_gq			*blkg;
215 	char				*body;
216 };
217 
218 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
219 		   char *input, struct blkg_conf_ctx *ctx);
220 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
221 
222 
223 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
224 {
225 	return css ? container_of(css, struct blkcg, css) : NULL;
226 }
227 
228 static inline struct blkcg *bio_blkcg(struct bio *bio)
229 {
230 	struct cgroup_subsys_state *css;
231 
232 	if (bio && bio->bi_css)
233 		return css_to_blkcg(bio->bi_css);
234 	css = kthread_blkcg();
235 	if (css)
236 		return css_to_blkcg(css);
237 	return css_to_blkcg(task_css(current, io_cgrp_id));
238 }
239 
240 /**
241  * blkcg_parent - get the parent of a blkcg
242  * @blkcg: blkcg of interest
243  *
244  * Return the parent blkcg of @blkcg.  Can be called anytime.
245  */
246 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
247 {
248 	return css_to_blkcg(blkcg->css.parent);
249 }
250 
251 /**
252  * __blkg_lookup - internal version of blkg_lookup()
253  * @blkcg: blkcg of interest
254  * @q: request_queue of interest
255  * @update_hint: whether to update lookup hint with the result or not
256  *
257  * This is internal version and shouldn't be used by policy
258  * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
259  * @q's bypass state.  If @update_hint is %true, the caller should be
260  * holding @q->queue_lock and lookup hint is updated on success.
261  */
262 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
263 					     struct request_queue *q,
264 					     bool update_hint)
265 {
266 	struct blkcg_gq *blkg;
267 
268 	if (blkcg == &blkcg_root)
269 		return q->root_blkg;
270 
271 	blkg = rcu_dereference(blkcg->blkg_hint);
272 	if (blkg && blkg->q == q)
273 		return blkg;
274 
275 	return blkg_lookup_slowpath(blkcg, q, update_hint);
276 }
277 
278 /**
279  * blkg_lookup - lookup blkg for the specified blkcg - q pair
280  * @blkcg: blkcg of interest
281  * @q: request_queue of interest
282  *
283  * Lookup blkg for the @blkcg - @q pair.  This function should be called
284  * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
285  * - see blk_queue_bypass_start() for details.
286  */
287 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
288 					   struct request_queue *q)
289 {
290 	WARN_ON_ONCE(!rcu_read_lock_held());
291 
292 	if (unlikely(blk_queue_bypass(q)))
293 		return NULL;
294 	return __blkg_lookup(blkcg, q, false);
295 }
296 
297 /**
298  * blkg_to_pdata - get policy private data
299  * @blkg: blkg of interest
300  * @pol: policy of interest
301  *
302  * Return pointer to private data associated with the @blkg-@pol pair.
303  */
304 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
305 						  struct blkcg_policy *pol)
306 {
307 	return blkg ? blkg->pd[pol->plid] : NULL;
308 }
309 
310 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
311 						     struct blkcg_policy *pol)
312 {
313 	return blkcg ? blkcg->cpd[pol->plid] : NULL;
314 }
315 
316 /**
317  * pdata_to_blkg - get blkg associated with policy private data
318  * @pd: policy private data of interest
319  *
320  * @pd is policy private data.  Determine the blkg it's associated with.
321  */
322 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
323 {
324 	return pd ? pd->blkg : NULL;
325 }
326 
327 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
328 {
329 	return cpd ? cpd->blkcg : NULL;
330 }
331 
332 /**
333  * blkg_path - format cgroup path of blkg
334  * @blkg: blkg of interest
335  * @buf: target buffer
336  * @buflen: target buffer length
337  *
338  * Format the path of the cgroup of @blkg into @buf.
339  */
340 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
341 {
342 	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
343 }
344 
345 /**
346  * blkg_get - get a blkg reference
347  * @blkg: blkg to get
348  *
349  * The caller should be holding an existing reference.
350  */
351 static inline void blkg_get(struct blkcg_gq *blkg)
352 {
353 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
354 	atomic_inc(&blkg->refcnt);
355 }
356 
357 void __blkg_release_rcu(struct rcu_head *rcu);
358 
359 /**
360  * blkg_put - put a blkg reference
361  * @blkg: blkg to put
362  */
363 static inline void blkg_put(struct blkcg_gq *blkg)
364 {
365 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
366 	if (atomic_dec_and_test(&blkg->refcnt))
367 		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
368 }
369 
370 /**
371  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
372  * @d_blkg: loop cursor pointing to the current descendant
373  * @pos_css: used for iteration
374  * @p_blkg: target blkg to walk descendants of
375  *
376  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
377  * read locked.  If called under either blkcg or queue lock, the iteration
378  * is guaranteed to include all and only online blkgs.  The caller may
379  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
380  * @p_blkg is included in the iteration and the first node to be visited.
381  */
382 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
383 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
384 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
385 					      (p_blkg)->q, false)))
386 
387 /**
388  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
389  * @d_blkg: loop cursor pointing to the current descendant
390  * @pos_css: used for iteration
391  * @p_blkg: target blkg to walk descendants of
392  *
393  * Similar to blkg_for_each_descendant_pre() but performs post-order
394  * traversal instead.  Synchronization rules are the same.  @p_blkg is
395  * included in the iteration and the last node to be visited.
396  */
397 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
398 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
399 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
400 					      (p_blkg)->q, false)))
401 
402 /**
403  * blk_get_rl - get request_list to use
404  * @q: request_queue of interest
405  * @bio: bio which will be attached to the allocated request (may be %NULL)
406  *
407  * The caller wants to allocate a request from @q to use for @bio.  Find
408  * the request_list to use and obtain a reference on it.  Should be called
409  * under queue_lock.  This function is guaranteed to return non-%NULL
410  * request_list.
411  */
412 static inline struct request_list *blk_get_rl(struct request_queue *q,
413 					      struct bio *bio)
414 {
415 	struct blkcg *blkcg;
416 	struct blkcg_gq *blkg;
417 
418 	rcu_read_lock();
419 
420 	blkcg = bio_blkcg(bio);
421 
422 	/* bypass blkg lookup and use @q->root_rl directly for root */
423 	if (blkcg == &blkcg_root)
424 		goto root_rl;
425 
426 	/*
427 	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
428 	 * or if either the blkcg or queue is going away.  Fall back to
429 	 * root_rl in such cases.
430 	 */
431 	blkg = blkg_lookup(blkcg, q);
432 	if (unlikely(!blkg))
433 		goto root_rl;
434 
435 	blkg_get(blkg);
436 	rcu_read_unlock();
437 	return &blkg->rl;
438 root_rl:
439 	rcu_read_unlock();
440 	return &q->root_rl;
441 }
442 
443 /**
444  * blk_put_rl - put request_list
445  * @rl: request_list to put
446  *
447  * Put the reference acquired by blk_get_rl().  Should be called under
448  * queue_lock.
449  */
450 static inline void blk_put_rl(struct request_list *rl)
451 {
452 	if (rl->blkg->blkcg != &blkcg_root)
453 		blkg_put(rl->blkg);
454 }
455 
456 /**
457  * blk_rq_set_rl - associate a request with a request_list
458  * @rq: request of interest
459  * @rl: target request_list
460  *
461  * Associate @rq with @rl so that accounting and freeing can know the
462  * request_list @rq came from.
463  */
464 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
465 {
466 	rq->rl = rl;
467 }
468 
469 /**
470  * blk_rq_rl - return the request_list a request came from
471  * @rq: request of interest
472  *
473  * Return the request_list @rq is allocated from.
474  */
475 static inline struct request_list *blk_rq_rl(struct request *rq)
476 {
477 	return rq->rl;
478 }
479 
480 struct request_list *__blk_queue_next_rl(struct request_list *rl,
481 					 struct request_queue *q);
482 /**
483  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
484  *
485  * Should be used under queue_lock.
486  */
487 #define blk_queue_for_each_rl(rl, q)	\
488 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
489 
490 static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
491 {
492 	int ret;
493 
494 	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
495 	if (ret)
496 		return ret;
497 
498 	atomic64_set(&stat->aux_cnt, 0);
499 	return 0;
500 }
501 
502 static inline void blkg_stat_exit(struct blkg_stat *stat)
503 {
504 	percpu_counter_destroy(&stat->cpu_cnt);
505 }
506 
507 /**
508  * blkg_stat_add - add a value to a blkg_stat
509  * @stat: target blkg_stat
510  * @val: value to add
511  *
512  * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
513  * don't re-enter this function for the same counter.
514  */
515 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
516 {
517 	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
518 }
519 
520 /**
521  * blkg_stat_read - read the current value of a blkg_stat
522  * @stat: blkg_stat to read
523  */
524 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
525 {
526 	return percpu_counter_sum_positive(&stat->cpu_cnt);
527 }
528 
529 /**
530  * blkg_stat_reset - reset a blkg_stat
531  * @stat: blkg_stat to reset
532  */
533 static inline void blkg_stat_reset(struct blkg_stat *stat)
534 {
535 	percpu_counter_set(&stat->cpu_cnt, 0);
536 	atomic64_set(&stat->aux_cnt, 0);
537 }
538 
539 /**
540  * blkg_stat_add_aux - add a blkg_stat into another's aux count
541  * @to: the destination blkg_stat
542  * @from: the source
543  *
544  * Add @from's count including the aux one to @to's aux count.
545  */
546 static inline void blkg_stat_add_aux(struct blkg_stat *to,
547 				     struct blkg_stat *from)
548 {
549 	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
550 		     &to->aux_cnt);
551 }
552 
553 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
554 {
555 	int i, ret;
556 
557 	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
558 		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
559 		if (ret) {
560 			while (--i >= 0)
561 				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
562 			return ret;
563 		}
564 		atomic64_set(&rwstat->aux_cnt[i], 0);
565 	}
566 	return 0;
567 }
568 
569 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
570 {
571 	int i;
572 
573 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
574 		percpu_counter_destroy(&rwstat->cpu_cnt[i]);
575 }
576 
577 /**
578  * blkg_rwstat_add - add a value to a blkg_rwstat
579  * @rwstat: target blkg_rwstat
580  * @op: REQ_OP and flags
581  * @val: value to add
582  *
583  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
584  * caller is responsible for synchronizing calls to this function.
585  */
586 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
587 				   unsigned int op, uint64_t val)
588 {
589 	struct percpu_counter *cnt;
590 
591 	if (op_is_write(op))
592 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
593 	else
594 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
595 
596 	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
597 
598 	if (op_is_sync(op))
599 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
600 	else
601 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
602 
603 	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
604 }
605 
606 /**
607  * blkg_rwstat_read - read the current values of a blkg_rwstat
608  * @rwstat: blkg_rwstat to read
609  *
610  * Read the current snapshot of @rwstat and return it in the aux counts.
611  */
612 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
613 {
614 	struct blkg_rwstat result;
615 	int i;
616 
617 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
618 		atomic64_set(&result.aux_cnt[i],
619 			     percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
620 	return result;
621 }
622 
623 /**
624  * blkg_rwstat_total - read the total count of a blkg_rwstat
625  * @rwstat: blkg_rwstat to read
626  *
627  * Return the total count of @rwstat regardless of the IO direction.  This
628  * function can be called without synchronization and takes care of u64
629  * atomicity.
630  */
631 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
632 {
633 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
634 
635 	return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
636 		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
637 }
638 
639 /**
640  * blkg_rwstat_reset - reset a blkg_rwstat
641  * @rwstat: blkg_rwstat to reset
642  */
643 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
644 {
645 	int i;
646 
647 	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
648 		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
649 		atomic64_set(&rwstat->aux_cnt[i], 0);
650 	}
651 }
652 
653 /**
654  * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
655  * @to: the destination blkg_rwstat
656  * @from: the source
657  *
658  * Add @from's count including the aux one to @to's aux count.
659  */
660 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
661 				       struct blkg_rwstat *from)
662 {
663 	struct blkg_rwstat v = blkg_rwstat_read(from);
664 	int i;
665 
666 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
667 		atomic64_add(atomic64_read(&v.aux_cnt[i]) +
668 			     atomic64_read(&from->aux_cnt[i]),
669 			     &to->aux_cnt[i]);
670 }
671 
672 #ifdef CONFIG_BLK_DEV_THROTTLING
673 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
674 			   struct bio *bio);
675 #else
676 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
677 				  struct bio *bio) { return false; }
678 #endif
679 
680 static inline bool blkcg_bio_issue_check(struct request_queue *q,
681 					 struct bio *bio)
682 {
683 	struct blkcg *blkcg;
684 	struct blkcg_gq *blkg;
685 	bool throtl = false;
686 
687 	rcu_read_lock();
688 	blkcg = bio_blkcg(bio);
689 
690 	/* associate blkcg if bio hasn't attached one */
691 	bio_associate_blkcg(bio, &blkcg->css);
692 
693 	blkg = blkg_lookup(blkcg, q);
694 	if (unlikely(!blkg)) {
695 		spin_lock_irq(q->queue_lock);
696 		blkg = blkg_lookup_create(blkcg, q);
697 		if (IS_ERR(blkg))
698 			blkg = NULL;
699 		spin_unlock_irq(q->queue_lock);
700 	}
701 
702 	throtl = blk_throtl_bio(q, blkg, bio);
703 
704 	if (!throtl) {
705 		blkg = blkg ?: q->root_blkg;
706 		blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
707 				bio->bi_iter.bi_size);
708 		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
709 	}
710 
711 	rcu_read_unlock();
712 	return !throtl;
713 }
714 
715 #else	/* CONFIG_BLK_CGROUP */
716 
717 struct blkcg {
718 };
719 
720 struct blkg_policy_data {
721 };
722 
723 struct blkcg_policy_data {
724 };
725 
726 struct blkcg_gq {
727 };
728 
729 struct blkcg_policy {
730 };
731 
732 #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
733 
734 #ifdef CONFIG_BLOCK
735 
736 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
737 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
738 static inline void blkcg_drain_queue(struct request_queue *q) { }
739 static inline void blkcg_exit_queue(struct request_queue *q) { }
740 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
741 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
742 static inline int blkcg_activate_policy(struct request_queue *q,
743 					const struct blkcg_policy *pol) { return 0; }
744 static inline void blkcg_deactivate_policy(struct request_queue *q,
745 					   const struct blkcg_policy *pol) { }
746 
747 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
748 
749 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
750 						  struct blkcg_policy *pol) { return NULL; }
751 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
752 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
753 static inline void blkg_get(struct blkcg_gq *blkg) { }
754 static inline void blkg_put(struct blkcg_gq *blkg) { }
755 
756 static inline struct request_list *blk_get_rl(struct request_queue *q,
757 					      struct bio *bio) { return &q->root_rl; }
758 static inline void blk_put_rl(struct request_list *rl) { }
759 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
760 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
761 
762 static inline bool blkcg_bio_issue_check(struct request_queue *q,
763 					 struct bio *bio) { return true; }
764 
765 #define blk_queue_for_each_rl(rl, q)	\
766 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
767 
768 #endif	/* CONFIG_BLOCK */
769 #endif	/* CONFIG_BLK_CGROUP */
770 #endif	/* _BLK_CGROUP_H */
771