xref: /linux-6.15/include/linux/blk-cgroup.h (revision 0bdede8a)
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4  * Common Block IO controller cgroup interface
5  *
6  * Based on ideas and code from CFQ, CFS and BFQ:
7  * Copyright (C) 2003 Jens Axboe <[email protected]>
8  *
9  * Copyright (C) 2008 Fabio Checconi <[email protected]>
10  *		      Paolo Valente <[email protected]>
11  *
12  * Copyright (C) 2009 Vivek Goyal <[email protected]>
13  * 	              Nauman Rafique <[email protected]>
14  */
15 
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
22 
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX		UINT_MAX
25 
26 #ifdef CONFIG_BLK_CGROUP
27 
28 enum blkg_rwstat_type {
29 	BLKG_RWSTAT_READ,
30 	BLKG_RWSTAT_WRITE,
31 	BLKG_RWSTAT_SYNC,
32 	BLKG_RWSTAT_ASYNC,
33 
34 	BLKG_RWSTAT_NR,
35 	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36 };
37 
38 struct blkcg_gq;
39 
40 struct blkcg {
41 	struct cgroup_subsys_state	css;
42 	spinlock_t			lock;
43 
44 	struct radix_tree_root		blkg_tree;
45 	struct blkcg_gq			*blkg_hint;
46 	struct hlist_head		blkg_list;
47 
48 	struct blkcg_policy_data	*pd[BLKCG_MAX_POLS];
49 
50 	struct list_head		all_blkcgs_node;
51 #ifdef CONFIG_CGROUP_WRITEBACK
52 	struct list_head		cgwb_list;
53 #endif
54 };
55 
56 struct blkg_stat {
57 	struct u64_stats_sync		syncp;
58 	uint64_t			cnt;
59 };
60 
61 struct blkg_rwstat {
62 	struct u64_stats_sync		syncp;
63 	uint64_t			cnt[BLKG_RWSTAT_NR];
64 };
65 
66 /*
67  * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68  * request_queue (q).  This is used by blkcg policies which need to track
69  * information per blkcg - q pair.
70  *
71  * There can be multiple active blkcg policies and each has its private
72  * data on each blkg, the size of which is determined by
73  * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
74  * together with blkg and invokes pd_init/exit_fn() methods.
75  *
76  * Such private data must embed struct blkg_policy_data (pd) at the
77  * beginning and pd_size can't be smaller than pd.
78  */
79 struct blkg_policy_data {
80 	/* the blkg and policy id this per-policy data belongs to */
81 	struct blkcg_gq			*blkg;
82 	int				plid;
83 
84 	/* used during policy activation */
85 	struct list_head		alloc_node;
86 };
87 
88 /*
89  * Policies that need to keep per-blkcg data which is independent
90  * from any request_queue associated to it must specify its size
91  * with the cpd_size field of the blkcg_policy structure and
92  * embed a blkcg_policy_data in it.  cpd_init() is invoked to let
93  * each policy handle per-blkcg data.
94  */
95 struct blkcg_policy_data {
96 	/* the policy id this per-policy data belongs to */
97 	int				plid;
98 };
99 
100 /* association between a blk cgroup and a request queue */
101 struct blkcg_gq {
102 	/* Pointer to the associated request_queue */
103 	struct request_queue		*q;
104 	struct list_head		q_node;
105 	struct hlist_node		blkcg_node;
106 	struct blkcg			*blkcg;
107 
108 	/*
109 	 * Each blkg gets congested separately and the congestion state is
110 	 * propagated to the matching bdi_writeback_congested.
111 	 */
112 	struct bdi_writeback_congested	*wb_congested;
113 
114 	/* all non-root blkcg_gq's are guaranteed to have access to parent */
115 	struct blkcg_gq			*parent;
116 
117 	/* request allocation list for this blkcg-q pair */
118 	struct request_list		rl;
119 
120 	/* reference count */
121 	atomic_t			refcnt;
122 
123 	/* is this blkg online? protected by both blkcg and q locks */
124 	bool				online;
125 
126 	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
127 
128 	struct rcu_head			rcu_head;
129 };
130 
131 typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
132 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
133 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
134 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
135 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
136 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
137 
138 struct blkcg_policy {
139 	int				plid;
140 	/* policy specific private data size */
141 	size_t				pd_size;
142 	/* policy specific per-blkcg data size */
143 	size_t				cpd_size;
144 	/* cgroup files for the policy */
145 	struct cftype			*cftypes;
146 
147 	/* operations */
148 	blkcg_pol_init_cpd_fn		*cpd_init_fn;
149 	blkcg_pol_init_pd_fn		*pd_init_fn;
150 	blkcg_pol_online_pd_fn		*pd_online_fn;
151 	blkcg_pol_offline_pd_fn		*pd_offline_fn;
152 	blkcg_pol_exit_pd_fn		*pd_exit_fn;
153 	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
154 };
155 
156 extern struct blkcg blkcg_root;
157 extern struct cgroup_subsys_state * const blkcg_root_css;
158 
159 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
160 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
161 				    struct request_queue *q);
162 int blkcg_init_queue(struct request_queue *q);
163 void blkcg_drain_queue(struct request_queue *q);
164 void blkcg_exit_queue(struct request_queue *q);
165 
166 /* Blkio controller policy registration */
167 int blkcg_policy_register(struct blkcg_policy *pol);
168 void blkcg_policy_unregister(struct blkcg_policy *pol);
169 int blkcg_activate_policy(struct request_queue *q,
170 			  const struct blkcg_policy *pol);
171 void blkcg_deactivate_policy(struct request_queue *q,
172 			     const struct blkcg_policy *pol);
173 
174 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
175 		       u64 (*prfill)(struct seq_file *,
176 				     struct blkg_policy_data *, int),
177 		       const struct blkcg_policy *pol, int data,
178 		       bool show_total);
179 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
180 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
181 			 const struct blkg_rwstat *rwstat);
182 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
183 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
184 		       int off);
185 
186 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
187 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
188 					     int off);
189 
190 struct blkg_conf_ctx {
191 	struct gendisk			*disk;
192 	struct blkcg_gq			*blkg;
193 	u64				v;
194 };
195 
196 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
197 		   const char *input, struct blkg_conf_ctx *ctx);
198 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
199 
200 
201 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
202 {
203 	return css ? container_of(css, struct blkcg, css) : NULL;
204 }
205 
206 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
207 {
208 	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
209 }
210 
211 static inline struct blkcg *bio_blkcg(struct bio *bio)
212 {
213 	if (bio && bio->bi_css)
214 		return css_to_blkcg(bio->bi_css);
215 	return task_blkcg(current);
216 }
217 
218 static inline struct cgroup_subsys_state *
219 task_get_blkcg_css(struct task_struct *task)
220 {
221 	return task_get_css(task, blkio_cgrp_id);
222 }
223 
224 /**
225  * blkcg_parent - get the parent of a blkcg
226  * @blkcg: blkcg of interest
227  *
228  * Return the parent blkcg of @blkcg.  Can be called anytime.
229  */
230 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
231 {
232 	return css_to_blkcg(blkcg->css.parent);
233 }
234 
235 /**
236  * blkg_to_pdata - get policy private data
237  * @blkg: blkg of interest
238  * @pol: policy of interest
239  *
240  * Return pointer to private data associated with the @blkg-@pol pair.
241  */
242 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
243 						  struct blkcg_policy *pol)
244 {
245 	return blkg ? blkg->pd[pol->plid] : NULL;
246 }
247 
248 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
249 						     struct blkcg_policy *pol)
250 {
251 	return blkcg ? blkcg->pd[pol->plid] : NULL;
252 }
253 
254 /**
255  * pdata_to_blkg - get blkg associated with policy private data
256  * @pd: policy private data of interest
257  *
258  * @pd is policy private data.  Determine the blkg it's associated with.
259  */
260 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
261 {
262 	return pd ? pd->blkg : NULL;
263 }
264 
265 /**
266  * blkg_path - format cgroup path of blkg
267  * @blkg: blkg of interest
268  * @buf: target buffer
269  * @buflen: target buffer length
270  *
271  * Format the path of the cgroup of @blkg into @buf.
272  */
273 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
274 {
275 	char *p;
276 
277 	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
278 	if (!p) {
279 		strncpy(buf, "<unavailable>", buflen);
280 		return -ENAMETOOLONG;
281 	}
282 
283 	memmove(buf, p, buf + buflen - p);
284 	return 0;
285 }
286 
287 /**
288  * blkg_get - get a blkg reference
289  * @blkg: blkg to get
290  *
291  * The caller should be holding an existing reference.
292  */
293 static inline void blkg_get(struct blkcg_gq *blkg)
294 {
295 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
296 	atomic_inc(&blkg->refcnt);
297 }
298 
299 void __blkg_release_rcu(struct rcu_head *rcu);
300 
301 /**
302  * blkg_put - put a blkg reference
303  * @blkg: blkg to put
304  */
305 static inline void blkg_put(struct blkcg_gq *blkg)
306 {
307 	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
308 	if (atomic_dec_and_test(&blkg->refcnt))
309 		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
310 }
311 
312 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
313 			       bool update_hint);
314 
315 /**
316  * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
317  * @d_blkg: loop cursor pointing to the current descendant
318  * @pos_css: used for iteration
319  * @p_blkg: target blkg to walk descendants of
320  *
321  * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
322  * read locked.  If called under either blkcg or queue lock, the iteration
323  * is guaranteed to include all and only online blkgs.  The caller may
324  * update @pos_css by calling css_rightmost_descendant() to skip subtree.
325  * @p_blkg is included in the iteration and the first node to be visited.
326  */
327 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
328 	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
329 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
330 					      (p_blkg)->q, false)))
331 
332 /**
333  * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
334  * @d_blkg: loop cursor pointing to the current descendant
335  * @pos_css: used for iteration
336  * @p_blkg: target blkg to walk descendants of
337  *
338  * Similar to blkg_for_each_descendant_pre() but performs post-order
339  * traversal instead.  Synchronization rules are the same.  @p_blkg is
340  * included in the iteration and the last node to be visited.
341  */
342 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
343 	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
344 		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
345 					      (p_blkg)->q, false)))
346 
347 /**
348  * blk_get_rl - get request_list to use
349  * @q: request_queue of interest
350  * @bio: bio which will be attached to the allocated request (may be %NULL)
351  *
352  * The caller wants to allocate a request from @q to use for @bio.  Find
353  * the request_list to use and obtain a reference on it.  Should be called
354  * under queue_lock.  This function is guaranteed to return non-%NULL
355  * request_list.
356  */
357 static inline struct request_list *blk_get_rl(struct request_queue *q,
358 					      struct bio *bio)
359 {
360 	struct blkcg *blkcg;
361 	struct blkcg_gq *blkg;
362 
363 	rcu_read_lock();
364 
365 	blkcg = bio_blkcg(bio);
366 
367 	/* bypass blkg lookup and use @q->root_rl directly for root */
368 	if (blkcg == &blkcg_root)
369 		goto root_rl;
370 
371 	/*
372 	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
373 	 * or if either the blkcg or queue is going away.  Fall back to
374 	 * root_rl in such cases.
375 	 */
376 	blkg = blkg_lookup_create(blkcg, q);
377 	if (unlikely(IS_ERR(blkg)))
378 		goto root_rl;
379 
380 	blkg_get(blkg);
381 	rcu_read_unlock();
382 	return &blkg->rl;
383 root_rl:
384 	rcu_read_unlock();
385 	return &q->root_rl;
386 }
387 
388 /**
389  * blk_put_rl - put request_list
390  * @rl: request_list to put
391  *
392  * Put the reference acquired by blk_get_rl().  Should be called under
393  * queue_lock.
394  */
395 static inline void blk_put_rl(struct request_list *rl)
396 {
397 	/* root_rl may not have blkg set */
398 	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
399 		blkg_put(rl->blkg);
400 }
401 
402 /**
403  * blk_rq_set_rl - associate a request with a request_list
404  * @rq: request of interest
405  * @rl: target request_list
406  *
407  * Associate @rq with @rl so that accounting and freeing can know the
408  * request_list @rq came from.
409  */
410 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
411 {
412 	rq->rl = rl;
413 }
414 
415 /**
416  * blk_rq_rl - return the request_list a request came from
417  * @rq: request of interest
418  *
419  * Return the request_list @rq is allocated from.
420  */
421 static inline struct request_list *blk_rq_rl(struct request *rq)
422 {
423 	return rq->rl;
424 }
425 
426 struct request_list *__blk_queue_next_rl(struct request_list *rl,
427 					 struct request_queue *q);
428 /**
429  * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
430  *
431  * Should be used under queue_lock.
432  */
433 #define blk_queue_for_each_rl(rl, q)	\
434 	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
435 
436 static inline void blkg_stat_init(struct blkg_stat *stat)
437 {
438 	u64_stats_init(&stat->syncp);
439 }
440 
441 /**
442  * blkg_stat_add - add a value to a blkg_stat
443  * @stat: target blkg_stat
444  * @val: value to add
445  *
446  * Add @val to @stat.  The caller is responsible for synchronizing calls to
447  * this function.
448  */
449 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
450 {
451 	u64_stats_update_begin(&stat->syncp);
452 	stat->cnt += val;
453 	u64_stats_update_end(&stat->syncp);
454 }
455 
456 /**
457  * blkg_stat_read - read the current value of a blkg_stat
458  * @stat: blkg_stat to read
459  *
460  * Read the current value of @stat.  This function can be called without
461  * synchroniztion and takes care of u64 atomicity.
462  */
463 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
464 {
465 	unsigned int start;
466 	uint64_t v;
467 
468 	do {
469 		start = u64_stats_fetch_begin_irq(&stat->syncp);
470 		v = stat->cnt;
471 	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
472 
473 	return v;
474 }
475 
476 /**
477  * blkg_stat_reset - reset a blkg_stat
478  * @stat: blkg_stat to reset
479  */
480 static inline void blkg_stat_reset(struct blkg_stat *stat)
481 {
482 	stat->cnt = 0;
483 }
484 
485 /**
486  * blkg_stat_merge - merge a blkg_stat into another
487  * @to: the destination blkg_stat
488  * @from: the source
489  *
490  * Add @from's count to @to.
491  */
492 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
493 {
494 	blkg_stat_add(to, blkg_stat_read(from));
495 }
496 
497 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
498 {
499 	u64_stats_init(&rwstat->syncp);
500 }
501 
502 /**
503  * blkg_rwstat_add - add a value to a blkg_rwstat
504  * @rwstat: target blkg_rwstat
505  * @rw: mask of REQ_{WRITE|SYNC}
506  * @val: value to add
507  *
508  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
509  * caller is responsible for synchronizing calls to this function.
510  */
511 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
512 				   int rw, uint64_t val)
513 {
514 	u64_stats_update_begin(&rwstat->syncp);
515 
516 	if (rw & REQ_WRITE)
517 		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
518 	else
519 		rwstat->cnt[BLKG_RWSTAT_READ] += val;
520 	if (rw & REQ_SYNC)
521 		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
522 	else
523 		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
524 
525 	u64_stats_update_end(&rwstat->syncp);
526 }
527 
528 /**
529  * blkg_rwstat_read - read the current values of a blkg_rwstat
530  * @rwstat: blkg_rwstat to read
531  *
532  * Read the current snapshot of @rwstat and return it as the return value.
533  * This function can be called without synchronization and takes care of
534  * u64 atomicity.
535  */
536 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
537 {
538 	unsigned int start;
539 	struct blkg_rwstat tmp;
540 
541 	do {
542 		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
543 		tmp = *rwstat;
544 	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
545 
546 	return tmp;
547 }
548 
549 /**
550  * blkg_rwstat_total - read the total count of a blkg_rwstat
551  * @rwstat: blkg_rwstat to read
552  *
553  * Return the total count of @rwstat regardless of the IO direction.  This
554  * function can be called without synchronization and takes care of u64
555  * atomicity.
556  */
557 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
558 {
559 	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
560 
561 	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
562 }
563 
564 /**
565  * blkg_rwstat_reset - reset a blkg_rwstat
566  * @rwstat: blkg_rwstat to reset
567  */
568 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
569 {
570 	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
571 }
572 
573 /**
574  * blkg_rwstat_merge - merge a blkg_rwstat into another
575  * @to: the destination blkg_rwstat
576  * @from: the source
577  *
578  * Add @from's counts to @to.
579  */
580 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
581 				     struct blkg_rwstat *from)
582 {
583 	struct blkg_rwstat v = blkg_rwstat_read(from);
584 	int i;
585 
586 	u64_stats_update_begin(&to->syncp);
587 	for (i = 0; i < BLKG_RWSTAT_NR; i++)
588 		to->cnt[i] += v.cnt[i];
589 	u64_stats_update_end(&to->syncp);
590 }
591 
592 #else	/* CONFIG_BLK_CGROUP */
593 
594 struct blkcg {
595 };
596 
597 struct blkg_policy_data {
598 };
599 
600 struct blkcg_policy_data {
601 };
602 
603 struct blkcg_gq {
604 };
605 
606 struct blkcg_policy {
607 };
608 
609 #define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
610 
611 static inline struct cgroup_subsys_state *
612 task_get_blkcg_css(struct task_struct *task)
613 {
614 	return NULL;
615 }
616 
617 #ifdef CONFIG_BLOCK
618 
619 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
620 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
621 static inline void blkcg_drain_queue(struct request_queue *q) { }
622 static inline void blkcg_exit_queue(struct request_queue *q) { }
623 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
624 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
625 static inline int blkcg_activate_policy(struct request_queue *q,
626 					const struct blkcg_policy *pol) { return 0; }
627 static inline void blkcg_deactivate_policy(struct request_queue *q,
628 					   const struct blkcg_policy *pol) { }
629 
630 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
631 
632 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
633 						  struct blkcg_policy *pol) { return NULL; }
634 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
635 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
636 static inline void blkg_get(struct blkcg_gq *blkg) { }
637 static inline void blkg_put(struct blkcg_gq *blkg) { }
638 
639 static inline struct request_list *blk_get_rl(struct request_queue *q,
640 					      struct bio *bio) { return &q->root_rl; }
641 static inline void blk_put_rl(struct request_list *rl) { }
642 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
643 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
644 
645 #define blk_queue_for_each_rl(rl, q)	\
646 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
647 
648 #endif	/* CONFIG_BLOCK */
649 #endif	/* CONFIG_BLK_CGROUP */
650 #endif	/* _BLK_CGROUP_H */
651