1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BLK_CGROUP_H 3 #define _BLK_CGROUP_H 4 /* 5 * Common Block IO controller cgroup interface 6 * 7 * Based on ideas and code from CFQ, CFS and BFQ: 8 * Copyright (C) 2003 Jens Axboe <[email protected]> 9 * 10 * Copyright (C) 2008 Fabio Checconi <[email protected]> 11 * Paolo Valente <[email protected]> 12 * 13 * Copyright (C) 2009 Vivek Goyal <[email protected]> 14 * Nauman Rafique <[email protected]> 15 */ 16 17 #include <linux/cgroup.h> 18 #include <linux/percpu.h> 19 #include <linux/percpu_counter.h> 20 #include <linux/u64_stats_sync.h> 21 #include <linux/seq_file.h> 22 #include <linux/radix-tree.h> 23 #include <linux/blkdev.h> 24 #include <linux/atomic.h> 25 #include <linux/kthread.h> 26 #include <linux/fs.h> 27 28 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ 29 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) 30 31 /* Max limits for throttle policy */ 32 #define THROTL_IOPS_MAX UINT_MAX 33 34 #ifdef CONFIG_BLK_CGROUP 35 36 enum blkg_iostat_type { 37 BLKG_IOSTAT_READ, 38 BLKG_IOSTAT_WRITE, 39 BLKG_IOSTAT_DISCARD, 40 41 BLKG_IOSTAT_NR, 42 }; 43 44 struct blkcg_gq; 45 46 struct blkcg { 47 struct cgroup_subsys_state css; 48 spinlock_t lock; 49 50 struct radix_tree_root blkg_tree; 51 struct blkcg_gq __rcu *blkg_hint; 52 struct hlist_head blkg_list; 53 54 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS]; 55 56 struct list_head all_blkcgs_node; 57 #ifdef CONFIG_CGROUP_WRITEBACK 58 struct list_head cgwb_list; 59 refcount_t cgwb_refcnt; 60 #endif 61 }; 62 63 struct blkg_iostat { 64 u64 bytes[BLKG_IOSTAT_NR]; 65 u64 ios[BLKG_IOSTAT_NR]; 66 }; 67 68 struct blkg_iostat_set { 69 struct u64_stats_sync sync; 70 struct blkg_iostat cur; 71 struct blkg_iostat last; 72 }; 73 74 /* 75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a 76 * request_queue (q). This is used by blkcg policies which need to track 77 * information per blkcg - q pair. 78 * 79 * There can be multiple active blkcg policies and each blkg:policy pair is 80 * represented by a blkg_policy_data which is allocated and freed by each 81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data 82 * area by allocating larger data structure which embeds blkg_policy_data 83 * at the beginning. 84 */ 85 struct blkg_policy_data { 86 /* the blkg and policy id this per-policy data belongs to */ 87 struct blkcg_gq *blkg; 88 int plid; 89 }; 90 91 /* 92 * Policies that need to keep per-blkcg data which is independent from any 93 * request_queue associated to it should implement cpd_alloc/free_fn() 94 * methods. A policy can allocate private data area by allocating larger 95 * data structure which embeds blkcg_policy_data at the beginning. 96 * cpd_init() is invoked to let each policy handle per-blkcg data. 97 */ 98 struct blkcg_policy_data { 99 /* the blkcg and policy id this per-policy data belongs to */ 100 struct blkcg *blkcg; 101 int plid; 102 }; 103 104 /* association between a blk cgroup and a request queue */ 105 struct blkcg_gq { 106 /* Pointer to the associated request_queue */ 107 struct request_queue *q; 108 struct list_head q_node; 109 struct hlist_node blkcg_node; 110 struct blkcg *blkcg; 111 112 /* 113 * Each blkg gets congested separately and the congestion state is 114 * propagated to the matching bdi_writeback_congested. 115 */ 116 struct bdi_writeback_congested *wb_congested; 117 118 /* all non-root blkcg_gq's are guaranteed to have access to parent */ 119 struct blkcg_gq *parent; 120 121 /* reference count */ 122 struct percpu_ref refcnt; 123 124 /* is this blkg online? protected by both blkcg and q locks */ 125 bool online; 126 127 struct blkg_iostat_set __percpu *iostat_cpu; 128 struct blkg_iostat_set iostat; 129 130 struct blkg_policy_data *pd[BLKCG_MAX_POLS]; 131 132 spinlock_t async_bio_lock; 133 struct bio_list async_bios; 134 struct work_struct async_bio_work; 135 136 atomic_t use_delay; 137 atomic64_t delay_nsec; 138 atomic64_t delay_start; 139 u64 last_delay; 140 int last_use; 141 142 struct rcu_head rcu_head; 143 }; 144 145 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp); 146 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd); 147 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd); 148 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd); 149 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, 150 struct request_queue *q, struct blkcg *blkcg); 151 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd); 152 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); 153 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); 154 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); 155 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); 156 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, 157 size_t size); 158 159 struct blkcg_policy { 160 int plid; 161 /* cgroup files for the policy */ 162 struct cftype *dfl_cftypes; 163 struct cftype *legacy_cftypes; 164 165 /* operations */ 166 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn; 167 blkcg_pol_init_cpd_fn *cpd_init_fn; 168 blkcg_pol_free_cpd_fn *cpd_free_fn; 169 blkcg_pol_bind_cpd_fn *cpd_bind_fn; 170 171 blkcg_pol_alloc_pd_fn *pd_alloc_fn; 172 blkcg_pol_init_pd_fn *pd_init_fn; 173 blkcg_pol_online_pd_fn *pd_online_fn; 174 blkcg_pol_offline_pd_fn *pd_offline_fn; 175 blkcg_pol_free_pd_fn *pd_free_fn; 176 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; 177 blkcg_pol_stat_pd_fn *pd_stat_fn; 178 }; 179 180 extern struct blkcg blkcg_root; 181 extern struct cgroup_subsys_state * const blkcg_root_css; 182 extern bool blkcg_debug_stats; 183 184 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, 185 struct request_queue *q, bool update_hint); 186 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, 187 struct request_queue *q); 188 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 189 struct request_queue *q); 190 int blkcg_init_queue(struct request_queue *q); 191 void blkcg_drain_queue(struct request_queue *q); 192 void blkcg_exit_queue(struct request_queue *q); 193 194 /* Blkio controller policy registration */ 195 int blkcg_policy_register(struct blkcg_policy *pol); 196 void blkcg_policy_unregister(struct blkcg_policy *pol); 197 int blkcg_activate_policy(struct request_queue *q, 198 const struct blkcg_policy *pol); 199 void blkcg_deactivate_policy(struct request_queue *q, 200 const struct blkcg_policy *pol); 201 202 const char *blkg_dev_name(struct blkcg_gq *blkg); 203 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, 204 u64 (*prfill)(struct seq_file *, 205 struct blkg_policy_data *, int), 206 const struct blkcg_policy *pol, int data, 207 bool show_total); 208 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); 209 210 struct blkg_conf_ctx { 211 struct gendisk *disk; 212 struct blkcg_gq *blkg; 213 char *body; 214 }; 215 216 struct gendisk *blkcg_conf_get_disk(char **inputp); 217 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, 218 char *input, struct blkg_conf_ctx *ctx); 219 void blkg_conf_finish(struct blkg_conf_ctx *ctx); 220 221 /** 222 * blkcg_css - find the current css 223 * 224 * Find the css associated with either the kthread or the current task. 225 * This may return a dying css, so it is up to the caller to use tryget logic 226 * to confirm it is alive and well. 227 */ 228 static inline struct cgroup_subsys_state *blkcg_css(void) 229 { 230 struct cgroup_subsys_state *css; 231 232 css = kthread_blkcg(); 233 if (css) 234 return css; 235 return task_css(current, io_cgrp_id); 236 } 237 238 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) 239 { 240 return css ? container_of(css, struct blkcg, css) : NULL; 241 } 242 243 /** 244 * __bio_blkcg - internal, inconsistent version to get blkcg 245 * 246 * DO NOT USE. 247 * This function is inconsistent and consequently is dangerous to use. The 248 * first part of the function returns a blkcg where a reference is owned by the 249 * bio. This means it does not need to be rcu protected as it cannot go away 250 * with the bio owning a reference to it. However, the latter potentially gets 251 * it from task_css(). This can race against task migration and the cgroup 252 * dying. It is also semantically different as it must be called rcu protected 253 * and is susceptible to failure when trying to get a reference to it. 254 * Therefore, it is not ok to assume that *_get() will always succeed on the 255 * blkcg returned here. 256 */ 257 static inline struct blkcg *__bio_blkcg(struct bio *bio) 258 { 259 if (bio && bio->bi_blkg) 260 return bio->bi_blkg->blkcg; 261 return css_to_blkcg(blkcg_css()); 262 } 263 264 /** 265 * bio_blkcg - grab the blkcg associated with a bio 266 * @bio: target bio 267 * 268 * This returns the blkcg associated with a bio, %NULL if not associated. 269 * Callers are expected to either handle %NULL or know association has been 270 * done prior to calling this. 271 */ 272 static inline struct blkcg *bio_blkcg(struct bio *bio) 273 { 274 if (bio && bio->bi_blkg) 275 return bio->bi_blkg->blkcg; 276 return NULL; 277 } 278 279 static inline bool blk_cgroup_congested(void) 280 { 281 struct cgroup_subsys_state *css; 282 bool ret = false; 283 284 rcu_read_lock(); 285 css = kthread_blkcg(); 286 if (!css) 287 css = task_css(current, io_cgrp_id); 288 while (css) { 289 if (atomic_read(&css->cgroup->congestion_count)) { 290 ret = true; 291 break; 292 } 293 css = css->parent; 294 } 295 rcu_read_unlock(); 296 return ret; 297 } 298 299 /** 300 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg 301 * @return: true if this bio needs to be submitted with the root blkg context. 302 * 303 * In order to avoid priority inversions we sometimes need to issue a bio as if 304 * it were attached to the root blkg, and then backcharge to the actual owning 305 * blkg. The idea is we do bio_blkcg() to look up the actual context for the 306 * bio and attach the appropriate blkg to the bio. Then we call this helper and 307 * if it is true run with the root blkg for that queue and then do any 308 * backcharging to the originating cgroup once the io is complete. 309 */ 310 static inline bool bio_issue_as_root_blkg(struct bio *bio) 311 { 312 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; 313 } 314 315 /** 316 * blkcg_parent - get the parent of a blkcg 317 * @blkcg: blkcg of interest 318 * 319 * Return the parent blkcg of @blkcg. Can be called anytime. 320 */ 321 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) 322 { 323 return css_to_blkcg(blkcg->css.parent); 324 } 325 326 /** 327 * __blkg_lookup - internal version of blkg_lookup() 328 * @blkcg: blkcg of interest 329 * @q: request_queue of interest 330 * @update_hint: whether to update lookup hint with the result or not 331 * 332 * This is internal version and shouldn't be used by policy 333 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of 334 * @q's bypass state. If @update_hint is %true, the caller should be 335 * holding @q->queue_lock and lookup hint is updated on success. 336 */ 337 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 338 struct request_queue *q, 339 bool update_hint) 340 { 341 struct blkcg_gq *blkg; 342 343 if (blkcg == &blkcg_root) 344 return q->root_blkg; 345 346 blkg = rcu_dereference(blkcg->blkg_hint); 347 if (blkg && blkg->q == q) 348 return blkg; 349 350 return blkg_lookup_slowpath(blkcg, q, update_hint); 351 } 352 353 /** 354 * blkg_lookup - lookup blkg for the specified blkcg - q pair 355 * @blkcg: blkcg of interest 356 * @q: request_queue of interest 357 * 358 * Lookup blkg for the @blkcg - @q pair. This function should be called 359 * under RCU read lock. 360 */ 361 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, 362 struct request_queue *q) 363 { 364 WARN_ON_ONCE(!rcu_read_lock_held()); 365 return __blkg_lookup(blkcg, q, false); 366 } 367 368 /** 369 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair 370 * @q: request_queue of interest 371 * 372 * Lookup blkg for @q at the root level. See also blkg_lookup(). 373 */ 374 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 375 { 376 return q->root_blkg; 377 } 378 379 /** 380 * blkg_to_pdata - get policy private data 381 * @blkg: blkg of interest 382 * @pol: policy of interest 383 * 384 * Return pointer to private data associated with the @blkg-@pol pair. 385 */ 386 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 387 struct blkcg_policy *pol) 388 { 389 return blkg ? blkg->pd[pol->plid] : NULL; 390 } 391 392 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg, 393 struct blkcg_policy *pol) 394 { 395 return blkcg ? blkcg->cpd[pol->plid] : NULL; 396 } 397 398 /** 399 * pdata_to_blkg - get blkg associated with policy private data 400 * @pd: policy private data of interest 401 * 402 * @pd is policy private data. Determine the blkg it's associated with. 403 */ 404 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) 405 { 406 return pd ? pd->blkg : NULL; 407 } 408 409 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd) 410 { 411 return cpd ? cpd->blkcg : NULL; 412 } 413 414 extern void blkcg_destroy_blkgs(struct blkcg *blkcg); 415 416 #ifdef CONFIG_CGROUP_WRITEBACK 417 418 /** 419 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list 420 * @blkcg: blkcg of interest 421 * 422 * This is used to track the number of active wb's related to a blkcg. 423 */ 424 static inline void blkcg_cgwb_get(struct blkcg *blkcg) 425 { 426 refcount_inc(&blkcg->cgwb_refcnt); 427 } 428 429 /** 430 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list 431 * @blkcg: blkcg of interest 432 * 433 * This is used to track the number of active wb's related to a blkcg. 434 * When this count goes to zero, all active wb has finished so the 435 * blkcg can continue destruction by calling blkcg_destroy_blkgs(). 436 * This work may occur in cgwb_release_workfn() on the cgwb_release 437 * workqueue. 438 */ 439 static inline void blkcg_cgwb_put(struct blkcg *blkcg) 440 { 441 if (refcount_dec_and_test(&blkcg->cgwb_refcnt)) 442 blkcg_destroy_blkgs(blkcg); 443 } 444 445 #else 446 447 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { } 448 449 static inline void blkcg_cgwb_put(struct blkcg *blkcg) 450 { 451 /* wb isn't being accounted, so trigger destruction right away */ 452 blkcg_destroy_blkgs(blkcg); 453 } 454 455 #endif 456 457 /** 458 * blkg_path - format cgroup path of blkg 459 * @blkg: blkg of interest 460 * @buf: target buffer 461 * @buflen: target buffer length 462 * 463 * Format the path of the cgroup of @blkg into @buf. 464 */ 465 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 466 { 467 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 468 } 469 470 /** 471 * blkg_get - get a blkg reference 472 * @blkg: blkg to get 473 * 474 * The caller should be holding an existing reference. 475 */ 476 static inline void blkg_get(struct blkcg_gq *blkg) 477 { 478 percpu_ref_get(&blkg->refcnt); 479 } 480 481 /** 482 * blkg_tryget - try and get a blkg reference 483 * @blkg: blkg to get 484 * 485 * This is for use when doing an RCU lookup of the blkg. We may be in the midst 486 * of freeing this blkg, so we can only use it if the refcnt is not zero. 487 */ 488 static inline bool blkg_tryget(struct blkcg_gq *blkg) 489 { 490 return blkg && percpu_ref_tryget(&blkg->refcnt); 491 } 492 493 /** 494 * blkg_tryget_closest - try and get a blkg ref on the closet blkg 495 * @blkg: blkg to get 496 * 497 * This needs to be called rcu protected. As the failure mode here is to walk 498 * up the blkg tree, this ensure that the blkg->parent pointers are always 499 * valid. This returns the blkg that it ended up taking a reference on or %NULL 500 * if no reference was taken. 501 */ 502 static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) 503 { 504 struct blkcg_gq *ret_blkg = NULL; 505 506 WARN_ON_ONCE(!rcu_read_lock_held()); 507 508 while (blkg) { 509 if (blkg_tryget(blkg)) { 510 ret_blkg = blkg; 511 break; 512 } 513 blkg = blkg->parent; 514 } 515 516 return ret_blkg; 517 } 518 519 /** 520 * blkg_put - put a blkg reference 521 * @blkg: blkg to put 522 */ 523 static inline void blkg_put(struct blkcg_gq *blkg) 524 { 525 percpu_ref_put(&blkg->refcnt); 526 } 527 528 /** 529 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants 530 * @d_blkg: loop cursor pointing to the current descendant 531 * @pos_css: used for iteration 532 * @p_blkg: target blkg to walk descendants of 533 * 534 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU 535 * read locked. If called under either blkcg or queue lock, the iteration 536 * is guaranteed to include all and only online blkgs. The caller may 537 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 538 * @p_blkg is included in the iteration and the first node to be visited. 539 */ 540 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 541 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 542 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 543 (p_blkg)->q, false))) 544 545 /** 546 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants 547 * @d_blkg: loop cursor pointing to the current descendant 548 * @pos_css: used for iteration 549 * @p_blkg: target blkg to walk descendants of 550 * 551 * Similar to blkg_for_each_descendant_pre() but performs post-order 552 * traversal instead. Synchronization rules are the same. @p_blkg is 553 * included in the iteration and the last node to be visited. 554 */ 555 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 556 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 557 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ 558 (p_blkg)->q, false))) 559 560 #ifdef CONFIG_BLK_DEV_THROTTLING 561 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, 562 struct bio *bio); 563 #else 564 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, 565 struct bio *bio) { return false; } 566 #endif 567 568 bool __blkcg_punt_bio_submit(struct bio *bio); 569 570 static inline bool blkcg_punt_bio_submit(struct bio *bio) 571 { 572 if (bio->bi_opf & REQ_CGROUP_PUNT) 573 return __blkcg_punt_bio_submit(bio); 574 else 575 return false; 576 } 577 578 static inline void blkcg_bio_issue_init(struct bio *bio) 579 { 580 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 581 } 582 583 static inline bool blkcg_bio_issue_check(struct request_queue *q, 584 struct bio *bio) 585 { 586 struct blkcg_gq *blkg; 587 bool throtl = false; 588 589 rcu_read_lock(); 590 591 if (!bio->bi_blkg) { 592 char b[BDEVNAME_SIZE]; 593 594 WARN_ONCE(1, 595 "no blkg associated for bio on block-device: %s\n", 596 bio_devname(bio, b)); 597 bio_associate_blkg(bio); 598 } 599 600 blkg = bio->bi_blkg; 601 602 throtl = blk_throtl_bio(q, blkg, bio); 603 604 if (!throtl) { 605 struct blkg_iostat_set *bis; 606 int rwd, cpu; 607 608 if (op_is_discard(bio->bi_opf)) 609 rwd = BLKG_IOSTAT_DISCARD; 610 else if (op_is_write(bio->bi_opf)) 611 rwd = BLKG_IOSTAT_WRITE; 612 else 613 rwd = BLKG_IOSTAT_READ; 614 615 cpu = get_cpu(); 616 bis = per_cpu_ptr(blkg->iostat_cpu, cpu); 617 u64_stats_update_begin(&bis->sync); 618 619 /* 620 * If the bio is flagged with BIO_QUEUE_ENTERED it means this 621 * is a split bio and we would have already accounted for the 622 * size of the bio. 623 */ 624 if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) 625 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; 626 bis->cur.ios[rwd]++; 627 628 u64_stats_update_end(&bis->sync); 629 if (cgroup_subsys_on_dfl(io_cgrp_subsys)) 630 cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); 631 put_cpu(); 632 } 633 634 blkcg_bio_issue_init(bio); 635 636 rcu_read_unlock(); 637 return !throtl; 638 } 639 640 static inline void blkcg_use_delay(struct blkcg_gq *blkg) 641 { 642 if (atomic_add_return(1, &blkg->use_delay) == 1) 643 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); 644 } 645 646 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) 647 { 648 int old = atomic_read(&blkg->use_delay); 649 650 if (old == 0) 651 return 0; 652 653 /* 654 * We do this song and dance because we can race with somebody else 655 * adding or removing delay. If we just did an atomic_dec we'd end up 656 * negative and we'd already be in trouble. We need to subtract 1 and 657 * then check to see if we were the last delay so we can drop the 658 * congestion count on the cgroup. 659 */ 660 while (old) { 661 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1); 662 if (cur == old) 663 break; 664 old = cur; 665 } 666 667 if (old == 0) 668 return 0; 669 if (old == 1) 670 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 671 return 1; 672 } 673 674 static inline void blkcg_clear_delay(struct blkcg_gq *blkg) 675 { 676 int old = atomic_read(&blkg->use_delay); 677 if (!old) 678 return; 679 /* We only want 1 person clearing the congestion count for this blkg. */ 680 while (old) { 681 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); 682 if (cur == old) { 683 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); 684 break; 685 } 686 old = cur; 687 } 688 } 689 690 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); 691 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); 692 void blkcg_maybe_throttle_current(void); 693 #else /* CONFIG_BLK_CGROUP */ 694 695 struct blkcg { 696 }; 697 698 struct blkg_policy_data { 699 }; 700 701 struct blkcg_policy_data { 702 }; 703 704 struct blkcg_gq { 705 }; 706 707 struct blkcg_policy { 708 }; 709 710 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) 711 712 static inline void blkcg_maybe_throttle_current(void) { } 713 static inline bool blk_cgroup_congested(void) { return false; } 714 715 #ifdef CONFIG_BLOCK 716 717 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } 718 719 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } 720 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) 721 { return NULL; } 722 static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 723 static inline void blkcg_drain_queue(struct request_queue *q) { } 724 static inline void blkcg_exit_queue(struct request_queue *q) { } 725 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 726 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 727 static inline int blkcg_activate_policy(struct request_queue *q, 728 const struct blkcg_policy *pol) { return 0; } 729 static inline void blkcg_deactivate_policy(struct request_queue *q, 730 const struct blkcg_policy *pol) { } 731 732 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } 733 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } 734 735 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, 736 struct blkcg_policy *pol) { return NULL; } 737 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } 738 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } 739 static inline void blkg_get(struct blkcg_gq *blkg) { } 740 static inline void blkg_put(struct blkcg_gq *blkg) { } 741 742 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } 743 static inline void blkcg_bio_issue_init(struct bio *bio) { } 744 static inline bool blkcg_bio_issue_check(struct request_queue *q, 745 struct bio *bio) { return true; } 746 747 #define blk_queue_for_each_rl(rl, q) \ 748 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) 749 750 #endif /* CONFIG_BLOCK */ 751 #endif /* CONFIG_BLK_CGROUP */ 752 #endif /* _BLK_CGROUP_H */ 753