1 #ifndef _BLK_CGROUP_H 2 #define _BLK_CGROUP_H 3 /* 4 * Common Block IO controller cgroup interface 5 * 6 * Based on ideas and code from CFQ, CFS and BFQ: 7 * Copyright (C) 2003 Jens Axboe <[email protected]> 8 * 9 * Copyright (C) 2008 Fabio Checconi <[email protected]> 10 * Paolo Valente <[email protected]> 11 * 12 * Copyright (C) 2009 Vivek Goyal <[email protected]> 13 * Nauman Rafique <[email protected]> 14 */ 15 16 #include <linux/cgroup.h> 17 #include <linux/percpu_counter.h> 18 #include <linux/seq_file.h> 19 #include <linux/radix-tree.h> 20 #include <linux/blkdev.h> 21 #include <linux/atomic.h> 22 23 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ 24 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) 25 26 /* Max limits for throttle policy */ 27 #define THROTL_IOPS_MAX UINT_MAX 28 29 #ifdef CONFIG_BLK_CGROUP 30 31 enum blkg_rwstat_type { 32 BLKG_RWSTAT_READ, 33 BLKG_RWSTAT_WRITE, 34 BLKG_RWSTAT_SYNC, 35 BLKG_RWSTAT_ASYNC, 36 37 BLKG_RWSTAT_NR, 38 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, 39 }; 40 41 struct blkcg_gq; 42 43 struct blkcg { 44 struct cgroup_subsys_state css; 45 spinlock_t lock; 46 47 struct radix_tree_root blkg_tree; 48 struct blkcg_gq __rcu *blkg_hint; 49 struct hlist_head blkg_list; 50 51 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; 52 53 struct list_head all_blkcgs_node; 54 #ifdef CONFIG_CGROUP_WRITEBACK 55 struct list_head cgwb_list; 56 #endif 57 }; 58 59 /* 60 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for 61 * recursive. Used to carry stats of dead children, and, for blkg_rwstat, 62 * to carry result values from read and sum operations. 63 */ 64 struct blkg_stat { 65 struct percpu_counter cpu_cnt; 66 atomic64_t aux_cnt; 67 }; 68 69 struct blkg_rwstat { 70 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR]; 71 atomic64_t aux_cnt[BLKG_RWSTAT_NR]; 72 }; 73 74 /* 75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 76 * request_queue (q). This is used by blkcg policies which need to track 77 * information per blkcg - q pair. 78 * 79 * There can be multiple active blkcg policies and each blkg:policy pair is 80 * represented by a blkg_policy_data which is allocated and freed by each 81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data 82 * area by allocating larger data structure which embeds blkg_policy_data 83 * at the beginning. 84 */ 85 struct blkg_policy_data { 86 /* the blkg and policy id this per-policy data belongs to */ 87 struct blkcg_gq *blkg; 88 int plid; 89 }; 90 91 /* 92 * Policies that need to keep per-blkcg data which is independent from any 93 * request_queue associated to it should implement cpd_alloc/free_fn() 94 * methods. A policy can allocate private data area by allocating larger 95 * data structure which embeds blkcg_policy_data at the beginning. 96 * cpd_init() is invoked to let each policy handle per-blkcg data. 97 */ 98 struct blkcg_policy_data { 99 /* the blkcg and policy id this per-policy data belongs to */ 100 struct blkcg *blkcg; 101 int plid; 102 }; 103 104 /* association between a blk cgroup and a request queue */ 105 struct blkcg_gq { 106 /* Pointer to the associated request_queue */ 107 struct request_queue *q; 108 struct list_head q_node; 109 struct hlist_node blkcg_node; 110 struct blkcg *blkcg; 111 112 /* 113 * Each blkg gets congested separately and the congestion state is 114 * propagated to the matching bdi_writeback_congested. 115 */ 116 struct bdi_writeback_congested *wb_congested; 117 118 /* all non-root blkcg_gq's are guaranteed to have access to parent */ 119 struct blkcg_gq *parent; 120 121 /* request allocation list for this blkcg-q pair */ 122 struct request_list rl; 123 124 /* reference count */ 125 atomic_t refcnt; 126 127 /* is this blkg online? protected by both blkcg and q locks */ 128 bool online; 129 130 struct blkg_rwstat stat_bytes; 131 struct blkg_rwstat stat_ios; 132 133 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 134 135 struct rcu_head rcu_head; 136 }; 137 138 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 139 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); 140 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); 141 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); 142 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node); 143 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); 144 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); 145 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 146 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 147 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 148 149 struct blkcg_policy { 150 int plid; 151 /* cgroup files for the policy */ 152 struct cftype *dfl_cftypes; 153 struct cftype *legacy_cftypes; 154 155 /* operations */ 156 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; 157 blkcg_pol_init_cpd_fn *cpd_init_fn; 158 blkcg_pol_free_cpd_fn *cpd_free_fn; 159 blkcg_pol_bind_cpd_fn *cpd_bind_fn; 160 161 blkcg_pol_alloc_pd_fn *pd_alloc_fn; 162 blkcg_pol_init_pd_fn *pd_init_fn; 163 blkcg_pol_online_pd_fn *pd_online_fn; 164 blkcg_pol_offline_pd_fn *pd_offline_fn; 165 blkcg_pol_free_pd_fn *pd_free_fn; 166 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 167 }; 168 169 extern struct blkcg blkcg_root; 170 extern struct cgroup_subsys_state * const blkcg_root_css; 171 172 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 173 struct request_queue *q, bool update_hint); 174 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 175 struct request_queue *q); 176 int blkcg_init_queue(struct request_queue *q); 177 void blkcg_drain_queue(struct request_queue *q); 178 void blkcg_exit_queue(struct request_queue *q); 179 180 /* Blkio controller policy registration */ 181 int blkcg_policy_register(struct blkcg_policy *pol); 182 void blkcg_policy_unregister(struct blkcg_policy *pol); 183 int blkcg_activate_policy(struct request_queue *q, 184 const struct blkcg_policy *pol); 185 void blkcg_deactivate_policy(struct request_queue *q, 186 const struct blkcg_policy *pol); 187 188 const char *blkg_dev_name(struct blkcg_gq *blkg); 189 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 190 u64 (*prfill)(struct seq_file *, 191 struct blkg_policy_data *, int), 192 const struct blkcg_policy *pol, int data, 193 bool show_total); 194 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 195 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 196 const struct blkg_rwstat *rwstat); 197 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); 198 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, 199 int off); 200 int blkg_print_stat_bytes(struct seq_file *sf, void *v); 201 int blkg_print_stat_ios(struct seq_file *sf, void *v); 202 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v); 203 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v); 204 205 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, 206 struct blkcg_policy *pol, int off); 207 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, 208 struct blkcg_policy *pol, int off); 209 210 struct blkg_conf_ctx { 211 struct gendisk *disk; 212 struct blkcg_gq *blkg; 213 char *body; 214 }; 215 216 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 217 char *input, struct blkg_conf_ctx *ctx); 218 void blkg_conf_finish(struct blkg_conf_ctx *ctx); 219 220 221 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 222 { 223 return css ? container_of(css, struct blkcg, css) : NULL; 224 } 225 226 static inline struct blkcg *task_blkcg(struct task_struct *tsk) 227 { 228 return css_to_blkcg(task_css(tsk, io_cgrp_id)); 229 } 230 231 static inline struct blkcg *bio_blkcg(struct bio *bio) 232 { 233 if (bio && bio->bi_css) 234 return css_to_blkcg(bio->bi_css); 235 return task_blkcg(current); 236 } 237 238 static inline struct cgroup_subsys_state * 239 task_get_blkcg_css(struct task_struct *task) 240 { 241 return task_get_css(task, io_cgrp_id); 242 } 243 244 /** 245 * blkcg_parent - get the parent of a blkcg 246 * @blkcg: blkcg of interest 247 * 248 * Return the parent blkcg of @blkcg. Can be called anytime. 249 */ 250 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 251 { 252 return css_to_blkcg(blkcg->css.parent); 253 } 254 255 /** 256 * __blkg_lookup - internal version of blkg_lookup() 257 * @blkcg: blkcg of interest 258 * @q: request_queue of interest 259 * @update_hint: whether to update lookup hint with the result or not 260 * 261 * This is internal version and shouldn't be used by policy 262 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of 263 * @q's bypass state. If @update_hint is %true, the caller should be 264 * holding @q->queue_lock and lookup hint is updated on success. 265 */ 266 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 267 struct request_queue *q, 268 bool update_hint) 269 { 270 struct blkcg_gq *blkg; 271 272 if (blkcg == &blkcg_root) 273 return q->root_blkg; 274 275 blkg = rcu_dereference(blkcg->blkg_hint); 276 if (blkg && blkg->q == q) 277 return blkg; 278 279 return blkg_lookup_slowpath(blkcg, q, update_hint); 280 } 281 282 /** 283 * blkg_lookup - lookup blkg for the specified blkcg - q pair 284 * @blkcg: blkcg of interest 285 * @q: request_queue of interest 286 * 287 * Lookup blkg for the @blkcg - @q pair. This function should be called 288 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing 289 * - see blk_queue_bypass_start() for details. 290 */ 291 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, 292 struct request_queue *q) 293 { 294 WARN_ON_ONCE(!rcu_read_lock_held()); 295 296 if (unlikely(blk_queue_bypass(q))) 297 return NULL; 298 return __blkg_lookup(blkcg, q, false); 299 } 300 301 /** 302 * blkg_to_pdata - get policy private data 303 * @blkg: blkg of interest 304 * @pol: policy of interest 305 * 306 * Return pointer to private data associated with the @blkg-@pol pair. 307 */ 308 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 309 struct blkcg_policy *pol) 310 { 311 return blkg ? blkg->pd[pol->plid] : NULL; 312 } 313 314 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 315 struct blkcg_policy *pol) 316 { 317 return blkcg ? blkcg->cpd[pol->plid] : NULL; 318 } 319 320 /** 321 * pdata_to_blkg - get blkg associated with policy private data 322 * @pd: policy private data of interest 323 * 324 * @pd is policy private data. Determine the blkg it's associated with. 325 */ 326 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 327 { 328 return pd ? pd->blkg : NULL; 329 } 330 331 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) 332 { 333 return cpd ? cpd->blkcg : NULL; 334 } 335 336 /** 337 * blkg_path - format cgroup path of blkg 338 * @blkg: blkg of interest 339 * @buf: target buffer 340 * @buflen: target buffer length 341 * 342 * Format the path of the cgroup of @blkg into @buf. 343 */ 344 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 345 { 346 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 347 } 348 349 /** 350 * blkg_get - get a blkg reference 351 * @blkg: blkg to get 352 * 353 * The caller should be holding an existing reference. 354 */ 355 static inline void blkg_get(struct blkcg_gq *blkg) 356 { 357 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); 358 atomic_inc(&blkg->refcnt); 359 } 360 361 void __blkg_release_rcu(struct rcu_head *rcu); 362 363 /** 364 * blkg_put - put a blkg reference 365 * @blkg: blkg to put 366 */ 367 static inline void blkg_put(struct blkcg_gq *blkg) 368 { 369 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); 370 if (atomic_dec_and_test(&blkg->refcnt)) 371 call_rcu(&blkg->rcu_head, __blkg_release_rcu); 372 } 373 374 /** 375 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 376 * @d_blkg: loop cursor pointing to the current descendant 377 * @pos_css: used for iteration 378 * @p_blkg: target blkg to walk descendants of 379 * 380 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 381 * read locked. If called under either blkcg or queue lock, the iteration 382 * is guaranteed to include all and only online blkgs. The caller may 383 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 384 * @p_blkg is included in the iteration and the first node to be visited. 385 */ 386 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 387 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 388 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 389 (p_blkg)->q, false))) 390 391 /** 392 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 393 * @d_blkg: loop cursor pointing to the current descendant 394 * @pos_css: used for iteration 395 * @p_blkg: target blkg to walk descendants of 396 * 397 * Similar to blkg_for_each_descendant_pre() but performs post-order 398 * traversal instead. Synchronization rules are the same. @p_blkg is 399 * included in the iteration and the last node to be visited. 400 */ 401 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 402 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 403 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 404 (p_blkg)->q, false))) 405 406 /** 407 * blk_get_rl - get request_list to use 408 * @q: request_queue of interest 409 * @bio: bio which will be attached to the allocated request (may be %NULL) 410 * 411 * The caller wants to allocate a request from @q to use for @bio. Find 412 * the request_list to use and obtain a reference on it. Should be called 413 * under queue_lock. This function is guaranteed to return non-%NULL 414 * request_list. 415 */ 416 static inline struct request_list *blk_get_rl(struct request_queue *q, 417 struct bio *bio) 418 { 419 struct blkcg *blkcg; 420 struct blkcg_gq *blkg; 421 422 rcu_read_lock(); 423 424 blkcg = bio_blkcg(bio); 425 426 /* bypass blkg lookup and use @q->root_rl directly for root */ 427 if (blkcg == &blkcg_root) 428 goto root_rl; 429 430 /* 431 * Try to use blkg->rl. blkg lookup may fail under memory pressure 432 * or if either the blkcg or queue is going away. Fall back to 433 * root_rl in such cases. 434 */ 435 blkg = blkg_lookup(blkcg, q); 436 if (unlikely(!blkg)) 437 goto root_rl; 438 439 blkg_get(blkg); 440 rcu_read_unlock(); 441 return &blkg->rl; 442 root_rl: 443 rcu_read_unlock(); 444 return &q->root_rl; 445 } 446 447 /** 448 * blk_put_rl - put request_list 449 * @rl: request_list to put 450 * 451 * Put the reference acquired by blk_get_rl(). Should be called under 452 * queue_lock. 453 */ 454 static inline void blk_put_rl(struct request_list *rl) 455 { 456 if (rl->blkg->blkcg != &blkcg_root) 457 blkg_put(rl->blkg); 458 } 459 460 /** 461 * blk_rq_set_rl - associate a request with a request_list 462 * @rq: request of interest 463 * @rl: target request_list 464 * 465 * Associate @rq with @rl so that accounting and freeing can know the 466 * request_list @rq came from. 467 */ 468 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) 469 { 470 rq->rl = rl; 471 } 472 473 /** 474 * blk_rq_rl - return the request_list a request came from 475 * @rq: request of interest 476 * 477 * Return the request_list @rq is allocated from. 478 */ 479 static inline struct request_list *blk_rq_rl(struct request *rq) 480 { 481 return rq->rl; 482 } 483 484 struct request_list *__blk_queue_next_rl(struct request_list *rl, 485 struct request_queue *q); 486 /** 487 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue 488 * 489 * Should be used under queue_lock. 490 */ 491 #define blk_queue_for_each_rl(rl, q) \ 492 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) 493 494 static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) 495 { 496 int ret; 497 498 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); 499 if (ret) 500 return ret; 501 502 atomic64_set(&stat->aux_cnt, 0); 503 return 0; 504 } 505 506 static inline void blkg_stat_exit(struct blkg_stat *stat) 507 { 508 percpu_counter_destroy(&stat->cpu_cnt); 509 } 510 511 /** 512 * blkg_stat_add - add a value to a blkg_stat 513 * @stat: target blkg_stat 514 * @val: value to add 515 * 516 * Add @val to @stat. The caller must ensure that IRQ on the same CPU 517 * don't re-enter this function for the same counter. 518 */ 519 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) 520 { 521 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); 522 } 523 524 /** 525 * blkg_stat_read - read the current value of a blkg_stat 526 * @stat: blkg_stat to read 527 */ 528 static inline uint64_t blkg_stat_read(struct blkg_stat *stat) 529 { 530 return percpu_counter_sum_positive(&stat->cpu_cnt); 531 } 532 533 /** 534 * blkg_stat_reset - reset a blkg_stat 535 * @stat: blkg_stat to reset 536 */ 537 static inline void blkg_stat_reset(struct blkg_stat *stat) 538 { 539 percpu_counter_set(&stat->cpu_cnt, 0); 540 atomic64_set(&stat->aux_cnt, 0); 541 } 542 543 /** 544 * blkg_stat_add_aux - add a blkg_stat into another's aux count 545 * @to: the destination blkg_stat 546 * @from: the source 547 * 548 * Add @from's count including the aux one to @to's aux count. 549 */ 550 static inline void blkg_stat_add_aux(struct blkg_stat *to, 551 struct blkg_stat *from) 552 { 553 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), 554 &to->aux_cnt); 555 } 556 557 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp) 558 { 559 int i, ret; 560 561 for (i = 0; i < BLKG_RWSTAT_NR; i++) { 562 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); 563 if (ret) { 564 while (--i >= 0) 565 percpu_counter_destroy(&rwstat->cpu_cnt[i]); 566 return ret; 567 } 568 atomic64_set(&rwstat->aux_cnt[i], 0); 569 } 570 return 0; 571 } 572 573 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat) 574 { 575 int i; 576 577 for (i = 0; i < BLKG_RWSTAT_NR; i++) 578 percpu_counter_destroy(&rwstat->cpu_cnt[i]); 579 } 580 581 /** 582 * blkg_rwstat_add - add a value to a blkg_rwstat 583 * @rwstat: target blkg_rwstat 584 * @op: REQ_OP and flags 585 * @val: value to add 586 * 587 * Add @val to @rwstat. The counters are chosen according to @rw. The 588 * caller is responsible for synchronizing calls to this function. 589 */ 590 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 591 unsigned int op, uint64_t val) 592 { 593 struct percpu_counter *cnt; 594 595 if (op_is_write(op)) 596 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE]; 597 else 598 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; 599 600 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); 601 602 if (op_is_sync(op)) 603 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; 604 else 605 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; 606 607 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); 608 } 609 610 /** 611 * blkg_rwstat_read - read the current values of a blkg_rwstat 612 * @rwstat: blkg_rwstat to read 613 * 614 * Read the current snapshot of @rwstat and return it in the aux counts. 615 */ 616 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) 617 { 618 struct blkg_rwstat result; 619 int i; 620 621 for (i = 0; i < BLKG_RWSTAT_NR; i++) 622 atomic64_set(&result.aux_cnt[i], 623 percpu_counter_sum_positive(&rwstat->cpu_cnt[i])); 624 return result; 625 } 626 627 /** 628 * blkg_rwstat_total - read the total count of a blkg_rwstat 629 * @rwstat: blkg_rwstat to read 630 * 631 * Return the total count of @rwstat regardless of the IO direction. This 632 * function can be called without synchronization and takes care of u64 633 * atomicity. 634 */ 635 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) 636 { 637 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); 638 639 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + 640 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); 641 } 642 643 /** 644 * blkg_rwstat_reset - reset a blkg_rwstat 645 * @rwstat: blkg_rwstat to reset 646 */ 647 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) 648 { 649 int i; 650 651 for (i = 0; i < BLKG_RWSTAT_NR; i++) { 652 percpu_counter_set(&rwstat->cpu_cnt[i], 0); 653 atomic64_set(&rwstat->aux_cnt[i], 0); 654 } 655 } 656 657 /** 658 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count 659 * @to: the destination blkg_rwstat 660 * @from: the source 661 * 662 * Add @from's count including the aux one to @to's aux count. 663 */ 664 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, 665 struct blkg_rwstat *from) 666 { 667 struct blkg_rwstat v = blkg_rwstat_read(from); 668 int i; 669 670 for (i = 0; i < BLKG_RWSTAT_NR; i++) 671 atomic64_add(atomic64_read(&v.aux_cnt[i]) + 672 atomic64_read(&from->aux_cnt[i]), 673 &to->aux_cnt[i]); 674 } 675 676 #ifdef CONFIG_BLK_DEV_THROTTLING 677 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, 678 struct bio *bio); 679 #else 680 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, 681 struct bio *bio) { return false; } 682 #endif 683 684 static inline bool blkcg_bio_issue_check(struct request_queue *q, 685 struct bio *bio) 686 { 687 struct blkcg *blkcg; 688 struct blkcg_gq *blkg; 689 bool throtl = false; 690 691 rcu_read_lock(); 692 blkcg = bio_blkcg(bio); 693 694 /* associate blkcg if bio hasn't attached one */ 695 bio_associate_blkcg(bio, &blkcg->css); 696 697 blkg = blkg_lookup(blkcg, q); 698 if (unlikely(!blkg)) { 699 spin_lock_irq(q->queue_lock); 700 blkg = blkg_lookup_create(blkcg, q); 701 if (IS_ERR(blkg)) 702 blkg = NULL; 703 spin_unlock_irq(q->queue_lock); 704 } 705 706 throtl = blk_throtl_bio(q, blkg, bio); 707 708 if (!throtl) { 709 blkg = blkg ?: q->root_blkg; 710 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, 711 bio->bi_iter.bi_size); 712 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); 713 } 714 715 rcu_read_unlock(); 716 return !throtl; 717 } 718 719 #else /* CONFIG_BLK_CGROUP */ 720 721 struct blkcg { 722 }; 723 724 struct blkg_policy_data { 725 }; 726 727 struct blkcg_policy_data { 728 }; 729 730 struct blkcg_gq { 731 }; 732 733 struct blkcg_policy { 734 }; 735 736 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) 737 738 static inline struct cgroup_subsys_state * 739 task_get_blkcg_css(struct task_struct *task) 740 { 741 return NULL; 742 } 743 744 #ifdef CONFIG_BLOCK 745 746 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 747 static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 748 static inline void blkcg_drain_queue(struct request_queue *q) { } 749 static inline void blkcg_exit_queue(struct request_queue *q) { } 750 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 751 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 752 static inline int blkcg_activate_policy(struct request_queue *q, 753 const struct blkcg_policy *pol) { return 0; } 754 static inline void blkcg_deactivate_policy(struct request_queue *q, 755 const struct blkcg_policy *pol) { } 756 757 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } 758 759 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 760 struct blkcg_policy *pol) { return NULL; } 761 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 762 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 763 static inline void blkg_get(struct blkcg_gq *blkg) { } 764 static inline void blkg_put(struct blkcg_gq *blkg) { } 765 766 static inline struct request_list *blk_get_rl(struct request_queue *q, 767 struct bio *bio) { return &q->root_rl; } 768 static inline void blk_put_rl(struct request_list *rl) { } 769 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } 770 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } 771 772 static inline bool blkcg_bio_issue_check(struct request_queue *q, 773 struct bio *bio) { return true; } 774 775 #define blk_queue_for_each_rl(rl, q) \ 776 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 777 778 #endif /* CONFIG_BLOCK */ 779 #endif /* CONFIG_BLK_CGROUP */ 780 #endif /* _BLK_CGROUP_H */ 781