1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_CGROUP_H 3 #define _LINUX_CGROUP_H 4 /* 5 * cgroup interface 6 * 7 * Copyright (C) 2003 BULL SA 8 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 9 * 10 */ 11 12 #include <linux/sched.h> 13 #include <linux/cpumask.h> 14 #include <linux/nodemask.h> 15 #include <linux/rculist.h> 16 #include <linux/cgroupstats.h> 17 #include <linux/fs.h> 18 #include <linux/seq_file.h> 19 #include <linux/kernfs.h> 20 #include <linux/jump_label.h> 21 #include <linux/types.h> 22 #include <linux/ns_common.h> 23 #include <linux/nsproxy.h> 24 #include <linux/user_namespace.h> 25 #include <linux/refcount.h> 26 #include <linux/kernel_stat.h> 27 28 #include <linux/cgroup-defs.h> 29 30 #ifdef CONFIG_CGROUPS 31 32 /* 33 * All weight knobs on the default hierarhcy should use the following min, 34 * default and max values. The default value is the logarithmic center of 35 * MIN and MAX and allows 100x to be expressed in both directions. 36 */ 37 #define CGROUP_WEIGHT_MIN 1 38 #define CGROUP_WEIGHT_DFL 100 39 #define CGROUP_WEIGHT_MAX 10000 40 41 /* walk only threadgroup leaders */ 42 #define CSS_TASK_ITER_PROCS (1U << 0) 43 /* walk all threaded css_sets in the domain */ 44 #define CSS_TASK_ITER_THREADED (1U << 1) 45 46 /* internal flags */ 47 #define CSS_TASK_ITER_SKIPPED (1U << 16) 48 49 /* a css_task_iter should be treated as an opaque object */ 50 struct css_task_iter { 51 struct cgroup_subsys *ss; 52 unsigned int flags; 53 54 struct list_head *cset_pos; 55 struct list_head *cset_head; 56 57 struct list_head *tcset_pos; 58 struct list_head *tcset_head; 59 60 struct list_head *task_pos; 61 struct list_head *tasks_head; 62 struct list_head *mg_tasks_head; 63 struct list_head *dying_tasks_head; 64 65 struct css_set *cur_cset; 66 struct css_set *cur_dcset; 67 struct task_struct *cur_task; 68 struct list_head iters_node; /* css_set->task_iters */ 69 }; 70 71 extern struct cgroup_root cgrp_dfl_root; 72 extern struct css_set init_css_set; 73 74 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; 75 #include <linux/cgroup_subsys.h> 76 #undef SUBSYS 77 78 #define SUBSYS(_x) \ 79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ 80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; 81 #include <linux/cgroup_subsys.h> 82 #undef SUBSYS 83 84 /** 85 * cgroup_subsys_enabled - fast test on whether a subsys is enabled 86 * @ss: subsystem in question 87 */ 88 #define cgroup_subsys_enabled(ss) \ 89 static_branch_likely(&ss ## _enabled_key) 90 91 /** 92 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy 93 * @ss: subsystem in question 94 */ 95 #define cgroup_subsys_on_dfl(ss) \ 96 static_branch_likely(&ss ## _on_dfl_key) 97 98 bool css_has_online_children(struct cgroup_subsys_state *css); 99 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); 100 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, 101 struct cgroup_subsys *ss); 102 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, 103 struct cgroup_subsys *ss); 104 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 105 struct cgroup_subsys *ss); 106 107 struct cgroup *cgroup_get_from_path(const char *path); 108 struct cgroup *cgroup_get_from_fd(int fd); 109 110 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 111 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 112 113 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 114 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 115 int cgroup_rm_cftypes(struct cftype *cfts); 116 void cgroup_file_notify(struct cgroup_file *cfile); 117 118 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); 119 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); 120 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 121 struct pid *pid, struct task_struct *tsk); 122 123 void cgroup_fork(struct task_struct *p); 124 extern int cgroup_can_fork(struct task_struct *p); 125 extern void cgroup_cancel_fork(struct task_struct *p); 126 extern void cgroup_post_fork(struct task_struct *p); 127 void cgroup_exit(struct task_struct *p); 128 void cgroup_release(struct task_struct *p); 129 void cgroup_free(struct task_struct *p); 130 131 int cgroup_init_early(void); 132 int cgroup_init(void); 133 134 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); 135 136 /* 137 * Iteration helpers and macros. 138 */ 139 140 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, 141 struct cgroup_subsys_state *parent); 142 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, 143 struct cgroup_subsys_state *css); 144 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); 145 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, 146 struct cgroup_subsys_state *css); 147 148 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, 149 struct cgroup_subsys_state **dst_cssp); 150 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, 151 struct cgroup_subsys_state **dst_cssp); 152 153 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, 154 struct css_task_iter *it); 155 struct task_struct *css_task_iter_next(struct css_task_iter *it); 156 void css_task_iter_end(struct css_task_iter *it); 157 158 /** 159 * css_for_each_child - iterate through children of a css 160 * @pos: the css * to use as the loop cursor 161 * @parent: css whose children to walk 162 * 163 * Walk @parent's children. Must be called under rcu_read_lock(). 164 * 165 * If a subsystem synchronizes ->css_online() and the start of iteration, a 166 * css which finished ->css_online() is guaranteed to be visible in the 167 * future iterations and will stay visible until the last reference is put. 168 * A css which hasn't finished ->css_online() or already finished 169 * ->css_offline() may show up during traversal. It's each subsystem's 170 * responsibility to synchronize against on/offlining. 171 * 172 * It is allowed to temporarily drop RCU read lock during iteration. The 173 * caller is responsible for ensuring that @pos remains accessible until 174 * the start of the next iteration by, for example, bumping the css refcnt. 175 */ 176 #define css_for_each_child(pos, parent) \ 177 for ((pos) = css_next_child(NULL, (parent)); (pos); \ 178 (pos) = css_next_child((pos), (parent))) 179 180 /** 181 * css_for_each_descendant_pre - pre-order walk of a css's descendants 182 * @pos: the css * to use as the loop cursor 183 * @root: css whose descendants to walk 184 * 185 * Walk @root's descendants. @root is included in the iteration and the 186 * first node to be visited. Must be called under rcu_read_lock(). 187 * 188 * If a subsystem synchronizes ->css_online() and the start of iteration, a 189 * css which finished ->css_online() is guaranteed to be visible in the 190 * future iterations and will stay visible until the last reference is put. 191 * A css which hasn't finished ->css_online() or already finished 192 * ->css_offline() may show up during traversal. It's each subsystem's 193 * responsibility to synchronize against on/offlining. 194 * 195 * For example, the following guarantees that a descendant can't escape 196 * state updates of its ancestors. 197 * 198 * my_online(@css) 199 * { 200 * Lock @css's parent and @css; 201 * Inherit state from the parent; 202 * Unlock both. 203 * } 204 * 205 * my_update_state(@css) 206 * { 207 * css_for_each_descendant_pre(@pos, @css) { 208 * Lock @pos; 209 * if (@pos == @css) 210 * Update @css's state; 211 * else 212 * Verify @pos is alive and inherit state from its parent; 213 * Unlock @pos; 214 * } 215 * } 216 * 217 * As long as the inheriting step, including checking the parent state, is 218 * enclosed inside @pos locking, double-locking the parent isn't necessary 219 * while inheriting. The state update to the parent is guaranteed to be 220 * visible by walking order and, as long as inheriting operations to the 221 * same @pos are atomic to each other, multiple updates racing each other 222 * still result in the correct state. It's guaranateed that at least one 223 * inheritance happens for any css after the latest update to its parent. 224 * 225 * If checking parent's state requires locking the parent, each inheriting 226 * iteration should lock and unlock both @pos->parent and @pos. 227 * 228 * Alternatively, a subsystem may choose to use a single global lock to 229 * synchronize ->css_online() and ->css_offline() against tree-walking 230 * operations. 231 * 232 * It is allowed to temporarily drop RCU read lock during iteration. The 233 * caller is responsible for ensuring that @pos remains accessible until 234 * the start of the next iteration by, for example, bumping the css refcnt. 235 */ 236 #define css_for_each_descendant_pre(pos, css) \ 237 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ 238 (pos) = css_next_descendant_pre((pos), (css))) 239 240 /** 241 * css_for_each_descendant_post - post-order walk of a css's descendants 242 * @pos: the css * to use as the loop cursor 243 * @css: css whose descendants to walk 244 * 245 * Similar to css_for_each_descendant_pre() but performs post-order 246 * traversal instead. @root is included in the iteration and the last 247 * node to be visited. 248 * 249 * If a subsystem synchronizes ->css_online() and the start of iteration, a 250 * css which finished ->css_online() is guaranteed to be visible in the 251 * future iterations and will stay visible until the last reference is put. 252 * A css which hasn't finished ->css_online() or already finished 253 * ->css_offline() may show up during traversal. It's each subsystem's 254 * responsibility to synchronize against on/offlining. 255 * 256 * Note that the walk visibility guarantee example described in pre-order 257 * walk doesn't apply the same to post-order walks. 258 */ 259 #define css_for_each_descendant_post(pos, css) \ 260 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ 261 (pos) = css_next_descendant_post((pos), (css))) 262 263 /** 264 * cgroup_taskset_for_each - iterate cgroup_taskset 265 * @task: the loop cursor 266 * @dst_css: the destination css 267 * @tset: taskset to iterate 268 * 269 * @tset may contain multiple tasks and they may belong to multiple 270 * processes. 271 * 272 * On the v2 hierarchy, there may be tasks from multiple processes and they 273 * may not share the source or destination csses. 274 * 275 * On traditional hierarchies, when there are multiple tasks in @tset, if a 276 * task of a process is in @tset, all tasks of the process are in @tset. 277 * Also, all are guaranteed to share the same source and destination csses. 278 * 279 * Iteration is not in any specific order. 280 */ 281 #define cgroup_taskset_for_each(task, dst_css, tset) \ 282 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ 283 (task); \ 284 (task) = cgroup_taskset_next((tset), &(dst_css))) 285 286 /** 287 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset 288 * @leader: the loop cursor 289 * @dst_css: the destination css 290 * @tset: taskset to iterate 291 * 292 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset 293 * may not contain any. 294 */ 295 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ 296 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ 297 (leader); \ 298 (leader) = cgroup_taskset_next((tset), &(dst_css))) \ 299 if ((leader) != (leader)->group_leader) \ 300 ; \ 301 else 302 303 /* 304 * Inline functions. 305 */ 306 307 /** 308 * css_get - obtain a reference on the specified css 309 * @css: target css 310 * 311 * The caller must already have a reference. 312 */ 313 static inline void css_get(struct cgroup_subsys_state *css) 314 { 315 if (!(css->flags & CSS_NO_REF)) 316 percpu_ref_get(&css->refcnt); 317 } 318 319 /** 320 * css_get_many - obtain references on the specified css 321 * @css: target css 322 * @n: number of references to get 323 * 324 * The caller must already have a reference. 325 */ 326 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) 327 { 328 if (!(css->flags & CSS_NO_REF)) 329 percpu_ref_get_many(&css->refcnt, n); 330 } 331 332 /** 333 * css_tryget - try to obtain a reference on the specified css 334 * @css: target css 335 * 336 * Obtain a reference on @css unless it already has reached zero and is 337 * being released. This function doesn't care whether @css is on or 338 * offline. The caller naturally needs to ensure that @css is accessible 339 * but doesn't have to be holding a reference on it - IOW, RCU protected 340 * access is good enough for this function. Returns %true if a reference 341 * count was successfully obtained; %false otherwise. 342 */ 343 static inline bool css_tryget(struct cgroup_subsys_state *css) 344 { 345 if (!(css->flags & CSS_NO_REF)) 346 return percpu_ref_tryget(&css->refcnt); 347 return true; 348 } 349 350 /** 351 * css_tryget_online - try to obtain a reference on the specified css if online 352 * @css: target css 353 * 354 * Obtain a reference on @css if it's online. The caller naturally needs 355 * to ensure that @css is accessible but doesn't have to be holding a 356 * reference on it - IOW, RCU protected access is good enough for this 357 * function. Returns %true if a reference count was successfully obtained; 358 * %false otherwise. 359 */ 360 static inline bool css_tryget_online(struct cgroup_subsys_state *css) 361 { 362 if (!(css->flags & CSS_NO_REF)) 363 return percpu_ref_tryget_live(&css->refcnt); 364 return true; 365 } 366 367 /** 368 * css_is_dying - test whether the specified css is dying 369 * @css: target css 370 * 371 * Test whether @css is in the process of offlining or already offline. In 372 * most cases, ->css_online() and ->css_offline() callbacks should be 373 * enough; however, the actual offline operations are RCU delayed and this 374 * test returns %true also when @css is scheduled to be offlined. 375 * 376 * This is useful, for example, when the use case requires synchronous 377 * behavior with respect to cgroup removal. cgroup removal schedules css 378 * offlining but the css can seem alive while the operation is being 379 * delayed. If the delay affects user visible semantics, this test can be 380 * used to resolve the situation. 381 */ 382 static inline bool css_is_dying(struct cgroup_subsys_state *css) 383 { 384 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); 385 } 386 387 /** 388 * css_put - put a css reference 389 * @css: target css 390 * 391 * Put a reference obtained via css_get() and css_tryget_online(). 392 */ 393 static inline void css_put(struct cgroup_subsys_state *css) 394 { 395 if (!(css->flags & CSS_NO_REF)) 396 percpu_ref_put(&css->refcnt); 397 } 398 399 /** 400 * css_put_many - put css references 401 * @css: target css 402 * @n: number of references to put 403 * 404 * Put references obtained via css_get() and css_tryget_online(). 405 */ 406 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) 407 { 408 if (!(css->flags & CSS_NO_REF)) 409 percpu_ref_put_many(&css->refcnt, n); 410 } 411 412 static inline void cgroup_get(struct cgroup *cgrp) 413 { 414 css_get(&cgrp->self); 415 } 416 417 static inline bool cgroup_tryget(struct cgroup *cgrp) 418 { 419 return css_tryget(&cgrp->self); 420 } 421 422 static inline void cgroup_put(struct cgroup *cgrp) 423 { 424 css_put(&cgrp->self); 425 } 426 427 /** 428 * task_css_set_check - obtain a task's css_set with extra access conditions 429 * @task: the task to obtain css_set for 430 * @__c: extra condition expression to be passed to rcu_dereference_check() 431 * 432 * A task's css_set is RCU protected, initialized and exited while holding 433 * task_lock(), and can only be modified while holding both cgroup_mutex 434 * and task_lock() while the task is alive. This macro verifies that the 435 * caller is inside proper critical section and returns @task's css_set. 436 * 437 * The caller can also specify additional allowed conditions via @__c, such 438 * as locks used during the cgroup_subsys::attach() methods. 439 */ 440 #ifdef CONFIG_PROVE_RCU 441 extern struct mutex cgroup_mutex; 442 extern spinlock_t css_set_lock; 443 #define task_css_set_check(task, __c) \ 444 rcu_dereference_check((task)->cgroups, \ 445 lockdep_is_held(&cgroup_mutex) || \ 446 lockdep_is_held(&css_set_lock) || \ 447 ((task)->flags & PF_EXITING) || (__c)) 448 #else 449 #define task_css_set_check(task, __c) \ 450 rcu_dereference((task)->cgroups) 451 #endif 452 453 /** 454 * task_css_check - obtain css for (task, subsys) w/ extra access conds 455 * @task: the target task 456 * @subsys_id: the target subsystem ID 457 * @__c: extra condition expression to be passed to rcu_dereference_check() 458 * 459 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The 460 * synchronization rules are the same as task_css_set_check(). 461 */ 462 #define task_css_check(task, subsys_id, __c) \ 463 task_css_set_check((task), (__c))->subsys[(subsys_id)] 464 465 /** 466 * task_css_set - obtain a task's css_set 467 * @task: the task to obtain css_set for 468 * 469 * See task_css_set_check(). 470 */ 471 static inline struct css_set *task_css_set(struct task_struct *task) 472 { 473 return task_css_set_check(task, false); 474 } 475 476 /** 477 * task_css - obtain css for (task, subsys) 478 * @task: the target task 479 * @subsys_id: the target subsystem ID 480 * 481 * See task_css_check(). 482 */ 483 static inline struct cgroup_subsys_state *task_css(struct task_struct *task, 484 int subsys_id) 485 { 486 return task_css_check(task, subsys_id, false); 487 } 488 489 /** 490 * task_get_css - find and get the css for (task, subsys) 491 * @task: the target task 492 * @subsys_id: the target subsystem ID 493 * 494 * Find the css for the (@task, @subsys_id) combination, increment a 495 * reference on and return it. This function is guaranteed to return a 496 * valid css. The returned css may already have been offlined. 497 */ 498 static inline struct cgroup_subsys_state * 499 task_get_css(struct task_struct *task, int subsys_id) 500 { 501 struct cgroup_subsys_state *css; 502 503 rcu_read_lock(); 504 while (true) { 505 css = task_css(task, subsys_id); 506 /* 507 * Can't use css_tryget_online() here. A task which has 508 * PF_EXITING set may stay associated with an offline css. 509 * If such task calls this function, css_tryget_online() 510 * will keep failing. 511 */ 512 if (likely(css_tryget(css))) 513 break; 514 cpu_relax(); 515 } 516 rcu_read_unlock(); 517 return css; 518 } 519 520 /** 521 * task_css_is_root - test whether a task belongs to the root css 522 * @task: the target task 523 * @subsys_id: the target subsystem ID 524 * 525 * Test whether @task belongs to the root css on the specified subsystem. 526 * May be invoked in any context. 527 */ 528 static inline bool task_css_is_root(struct task_struct *task, int subsys_id) 529 { 530 return task_css_check(task, subsys_id, true) == 531 init_css_set.subsys[subsys_id]; 532 } 533 534 static inline struct cgroup *task_cgroup(struct task_struct *task, 535 int subsys_id) 536 { 537 return task_css(task, subsys_id)->cgroup; 538 } 539 540 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) 541 { 542 return task_css_set(task)->dfl_cgrp; 543 } 544 545 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) 546 { 547 struct cgroup_subsys_state *parent_css = cgrp->self.parent; 548 549 if (parent_css) 550 return container_of(parent_css, struct cgroup, self); 551 return NULL; 552 } 553 554 /** 555 * cgroup_is_descendant - test ancestry 556 * @cgrp: the cgroup to be tested 557 * @ancestor: possible ancestor of @cgrp 558 * 559 * Test whether @cgrp is a descendant of @ancestor. It also returns %true 560 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp 561 * and @ancestor are accessible. 562 */ 563 static inline bool cgroup_is_descendant(struct cgroup *cgrp, 564 struct cgroup *ancestor) 565 { 566 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) 567 return false; 568 return cgrp->ancestor_ids[ancestor->level] == ancestor->id; 569 } 570 571 /** 572 * cgroup_ancestor - find ancestor of cgroup 573 * @cgrp: cgroup to find ancestor of 574 * @ancestor_level: level of ancestor to find starting from root 575 * 576 * Find ancestor of cgroup at specified level starting from root if it exists 577 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at 578 * @ancestor_level. 579 * 580 * This function is safe to call as long as @cgrp is accessible. 581 */ 582 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, 583 int ancestor_level) 584 { 585 if (cgrp->level < ancestor_level) 586 return NULL; 587 while (cgrp && cgrp->level > ancestor_level) 588 cgrp = cgroup_parent(cgrp); 589 return cgrp; 590 } 591 592 /** 593 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry 594 * @task: the task to be tested 595 * @ancestor: possible ancestor of @task's cgroup 596 * 597 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 598 * It follows all the same rules as cgroup_is_descendant, and only applies 599 * to the default hierarchy. 600 */ 601 static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 602 struct cgroup *ancestor) 603 { 604 struct css_set *cset = task_css_set(task); 605 606 return cgroup_is_descendant(cset->dfl_cgrp, ancestor); 607 } 608 609 /* no synchronization, the result can only be used as a hint */ 610 static inline bool cgroup_is_populated(struct cgroup *cgrp) 611 { 612 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + 613 cgrp->nr_populated_threaded_children; 614 } 615 616 /* returns ino associated with a cgroup */ 617 static inline ino_t cgroup_ino(struct cgroup *cgrp) 618 { 619 return cgrp->kn->id.ino; 620 } 621 622 /* cft/css accessors for cftype->write() operation */ 623 static inline struct cftype *of_cft(struct kernfs_open_file *of) 624 { 625 return of->kn->priv; 626 } 627 628 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); 629 630 /* cft/css accessors for cftype->seq_*() operations */ 631 static inline struct cftype *seq_cft(struct seq_file *seq) 632 { 633 return of_cft(seq->private); 634 } 635 636 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) 637 { 638 return of_css(seq->private); 639 } 640 641 /* 642 * Name / path handling functions. All are thin wrappers around the kernfs 643 * counterparts and can be called under any context. 644 */ 645 646 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) 647 { 648 return kernfs_name(cgrp->kn, buf, buflen); 649 } 650 651 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) 652 { 653 return kernfs_path(cgrp->kn, buf, buflen); 654 } 655 656 static inline void pr_cont_cgroup_name(struct cgroup *cgrp) 657 { 658 pr_cont_kernfs_name(cgrp->kn); 659 } 660 661 static inline void pr_cont_cgroup_path(struct cgroup *cgrp) 662 { 663 pr_cont_kernfs_path(cgrp->kn); 664 } 665 666 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) 667 { 668 return &cgrp->psi; 669 } 670 671 static inline void cgroup_init_kthreadd(void) 672 { 673 /* 674 * kthreadd is inherited by all kthreads, keep it in the root so 675 * that the new kthreads are guaranteed to stay in the root until 676 * initialization is finished. 677 */ 678 current->no_cgroup_migration = 1; 679 } 680 681 static inline void cgroup_kthread_ready(void) 682 { 683 /* 684 * This kthread finished initialization. The creator should have 685 * set PF_NO_SETAFFINITY if this kthread should stay in the root. 686 */ 687 current->no_cgroup_migration = 0; 688 } 689 690 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) 691 { 692 return &cgrp->kn->id; 693 } 694 695 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, 696 char *buf, size_t buflen); 697 #else /* !CONFIG_CGROUPS */ 698 699 struct cgroup_subsys_state; 700 struct cgroup; 701 702 static inline void css_get(struct cgroup_subsys_state *css) {} 703 static inline void css_put(struct cgroup_subsys_state *css) {} 704 static inline int cgroup_attach_task_all(struct task_struct *from, 705 struct task_struct *t) { return 0; } 706 static inline int cgroupstats_build(struct cgroupstats *stats, 707 struct dentry *dentry) { return -EINVAL; } 708 709 static inline void cgroup_fork(struct task_struct *p) {} 710 static inline int cgroup_can_fork(struct task_struct *p) { return 0; } 711 static inline void cgroup_cancel_fork(struct task_struct *p) {} 712 static inline void cgroup_post_fork(struct task_struct *p) {} 713 static inline void cgroup_exit(struct task_struct *p) {} 714 static inline void cgroup_release(struct task_struct *p) {} 715 static inline void cgroup_free(struct task_struct *p) {} 716 717 static inline int cgroup_init_early(void) { return 0; } 718 static inline int cgroup_init(void) { return 0; } 719 static inline void cgroup_init_kthreadd(void) {} 720 static inline void cgroup_kthread_ready(void) {} 721 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp) 722 { 723 return NULL; 724 } 725 726 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) 727 { 728 return NULL; 729 } 730 731 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) 732 { 733 return NULL; 734 } 735 736 static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 737 struct cgroup *ancestor) 738 { 739 return true; 740 } 741 742 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, 743 char *buf, size_t buflen) {} 744 #endif /* !CONFIG_CGROUPS */ 745 746 #ifdef CONFIG_CGROUPS 747 /* 748 * cgroup scalable recursive statistics. 749 */ 750 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); 751 void cgroup_rstat_flush(struct cgroup *cgrp); 752 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp); 753 void cgroup_rstat_flush_hold(struct cgroup *cgrp); 754 void cgroup_rstat_flush_release(void); 755 756 /* 757 * Basic resource stats. 758 */ 759 #ifdef CONFIG_CGROUP_CPUACCT 760 void cpuacct_charge(struct task_struct *tsk, u64 cputime); 761 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); 762 #else 763 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 764 static inline void cpuacct_account_field(struct task_struct *tsk, int index, 765 u64 val) {} 766 #endif 767 768 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); 769 void __cgroup_account_cputime_field(struct cgroup *cgrp, 770 enum cpu_usage_stat index, u64 delta_exec); 771 772 static inline void cgroup_account_cputime(struct task_struct *task, 773 u64 delta_exec) 774 { 775 struct cgroup *cgrp; 776 777 cpuacct_charge(task, delta_exec); 778 779 rcu_read_lock(); 780 cgrp = task_dfl_cgroup(task); 781 if (cgroup_parent(cgrp)) 782 __cgroup_account_cputime(cgrp, delta_exec); 783 rcu_read_unlock(); 784 } 785 786 static inline void cgroup_account_cputime_field(struct task_struct *task, 787 enum cpu_usage_stat index, 788 u64 delta_exec) 789 { 790 struct cgroup *cgrp; 791 792 cpuacct_account_field(task, index, delta_exec); 793 794 rcu_read_lock(); 795 cgrp = task_dfl_cgroup(task); 796 if (cgroup_parent(cgrp)) 797 __cgroup_account_cputime_field(cgrp, index, delta_exec); 798 rcu_read_unlock(); 799 } 800 801 #else /* CONFIG_CGROUPS */ 802 803 static inline void cgroup_account_cputime(struct task_struct *task, 804 u64 delta_exec) {} 805 static inline void cgroup_account_cputime_field(struct task_struct *task, 806 enum cpu_usage_stat index, 807 u64 delta_exec) {} 808 809 #endif /* CONFIG_CGROUPS */ 810 811 /* 812 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 813 * definition in cgroup-defs.h. 814 */ 815 #ifdef CONFIG_SOCK_CGROUP_DATA 816 817 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 818 extern spinlock_t cgroup_sk_update_lock; 819 #endif 820 821 void cgroup_sk_alloc_disable(void); 822 void cgroup_sk_alloc(struct sock_cgroup_data *skcd); 823 void cgroup_sk_free(struct sock_cgroup_data *skcd); 824 825 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) 826 { 827 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 828 unsigned long v; 829 830 /* 831 * @skcd->val is 64bit but the following is safe on 32bit too as we 832 * just need the lower ulong to be written and read atomically. 833 */ 834 v = READ_ONCE(skcd->val); 835 836 if (v & 1) 837 return &cgrp_dfl_root.cgrp; 838 839 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; 840 #else 841 return (struct cgroup *)(unsigned long)skcd->val; 842 #endif 843 } 844 845 #else /* CONFIG_CGROUP_DATA */ 846 847 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} 848 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} 849 850 #endif /* CONFIG_CGROUP_DATA */ 851 852 struct cgroup_namespace { 853 refcount_t count; 854 struct ns_common ns; 855 struct user_namespace *user_ns; 856 struct ucounts *ucounts; 857 struct css_set *root_cset; 858 }; 859 860 extern struct cgroup_namespace init_cgroup_ns; 861 862 #ifdef CONFIG_CGROUPS 863 864 void free_cgroup_ns(struct cgroup_namespace *ns); 865 866 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, 867 struct user_namespace *user_ns, 868 struct cgroup_namespace *old_ns); 869 870 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, 871 struct cgroup_namespace *ns); 872 873 #else /* !CONFIG_CGROUPS */ 874 875 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } 876 static inline struct cgroup_namespace * 877 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, 878 struct cgroup_namespace *old_ns) 879 { 880 return old_ns; 881 } 882 883 #endif /* !CONFIG_CGROUPS */ 884 885 static inline void get_cgroup_ns(struct cgroup_namespace *ns) 886 { 887 if (ns) 888 refcount_inc(&ns->count); 889 } 890 891 static inline void put_cgroup_ns(struct cgroup_namespace *ns) 892 { 893 if (ns && refcount_dec_and_test(&ns->count)) 894 free_cgroup_ns(ns); 895 } 896 897 #ifdef CONFIG_CGROUPS 898 899 void cgroup_enter_frozen(void); 900 void cgroup_leave_frozen(bool always_leave); 901 void cgroup_update_frozen(struct cgroup *cgrp); 902 void cgroup_freeze(struct cgroup *cgrp, bool freeze); 903 void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, 904 struct cgroup *dst); 905 906 static inline bool cgroup_task_freeze(struct task_struct *task) 907 { 908 bool ret; 909 910 if (task->flags & PF_KTHREAD) 911 return false; 912 913 rcu_read_lock(); 914 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); 915 rcu_read_unlock(); 916 917 return ret; 918 } 919 920 static inline bool cgroup_task_frozen(struct task_struct *task) 921 { 922 return task->frozen; 923 } 924 925 #else /* !CONFIG_CGROUPS */ 926 927 static inline void cgroup_enter_frozen(void) { } 928 static inline void cgroup_leave_frozen(bool always_leave) { } 929 static inline bool cgroup_task_freeze(struct task_struct *task) 930 { 931 return false; 932 } 933 static inline bool cgroup_task_frozen(struct task_struct *task) 934 { 935 return false; 936 } 937 938 #endif /* !CONFIG_CGROUPS */ 939 940 #ifdef CONFIG_CGROUP_BPF 941 static inline void cgroup_bpf_get(struct cgroup *cgrp) 942 { 943 percpu_ref_get(&cgrp->bpf.refcnt); 944 } 945 946 static inline void cgroup_bpf_put(struct cgroup *cgrp) 947 { 948 percpu_ref_put(&cgrp->bpf.refcnt); 949 } 950 951 #else /* CONFIG_CGROUP_BPF */ 952 953 static inline void cgroup_bpf_get(struct cgroup *cgrp) {} 954 static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 955 956 #endif /* CONFIG_CGROUP_BPF */ 957 958 #endif /* _LINUX_CGROUP_H */ 959