1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * linux/cgroup-defs.h - basic definitions for cgroup 4 * 5 * This file provides basic type and interface. Include this file directly 6 * only if necessary to avoid cyclic dependencies. 7 */ 8 #ifndef _LINUX_CGROUP_DEFS_H 9 #define _LINUX_CGROUP_DEFS_H 10 11 #include <linux/limits.h> 12 #include <linux/list.h> 13 #include <linux/idr.h> 14 #include <linux/wait.h> 15 #include <linux/mutex.h> 16 #include <linux/rcupdate.h> 17 #include <linux/refcount.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/percpu-rwsem.h> 20 #include <linux/u64_stats_sync.h> 21 #include <linux/workqueue.h> 22 #include <linux/bpf-cgroup-defs.h> 23 #include <linux/psi_types.h> 24 25 #ifdef CONFIG_CGROUPS 26 27 struct cgroup; 28 struct cgroup_root; 29 struct cgroup_subsys; 30 struct cgroup_taskset; 31 struct kernfs_node; 32 struct kernfs_ops; 33 struct kernfs_open_file; 34 struct seq_file; 35 struct poll_table_struct; 36 37 #define MAX_CGROUP_TYPE_NAMELEN 32 38 #define MAX_CGROUP_ROOT_NAMELEN 64 39 #define MAX_CFTYPE_NAME 64 40 41 /* define the enumeration of all cgroup subsystems */ 42 #define SUBSYS(_x) _x ## _cgrp_id, 43 enum cgroup_subsys_id { 44 #include <linux/cgroup_subsys.h> 45 CGROUP_SUBSYS_COUNT, 46 }; 47 #undef SUBSYS 48 49 /* bits in struct cgroup_subsys_state flags field */ 50 enum { 51 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 52 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 53 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 54 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 55 CSS_DYING = (1 << 4), /* css is dying */ 56 }; 57 58 /* bits in struct cgroup flags field */ 59 enum { 60 /* Control Group requires release notifications to userspace */ 61 CGRP_NOTIFY_ON_RELEASE, 62 /* 63 * Clone the parent's configuration when creating a new child 64 * cpuset cgroup. For historical reasons, this option can be 65 * specified at mount time and thus is implemented here. 66 */ 67 CGRP_CPUSET_CLONE_CHILDREN, 68 69 /* Control group has to be frozen. */ 70 CGRP_FREEZE, 71 72 /* Cgroup is frozen. */ 73 CGRP_FROZEN, 74 75 /* Control group has to be killed. */ 76 CGRP_KILL, 77 }; 78 79 /* cgroup_root->flags */ 80 enum { 81 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ 82 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ 83 84 /* 85 * Consider namespaces as delegation boundaries. If this flag is 86 * set, controller specific interface files in a namespace root 87 * aren't writeable from inside the namespace. 88 */ 89 CGRP_ROOT_NS_DELEGATE = (1 << 3), 90 91 /* 92 * Reduce latencies on dynamic cgroup modifications such as task 93 * migrations and controller on/offs by disabling percpu operation on 94 * cgroup_threadgroup_rwsem. This makes hot path operations such as 95 * forks and exits into the slow path and more expensive. 96 * 97 * The static usage pattern of creating a cgroup, enabling controllers, 98 * and then seeding it with CLONE_INTO_CGROUP doesn't require write 99 * locking cgroup_threadgroup_rwsem and thus doesn't benefit from 100 * favordynmod. 101 */ 102 CGRP_ROOT_FAVOR_DYNMODS = (1 << 4), 103 104 /* 105 * Enable cpuset controller in v1 cgroup to use v2 behavior. 106 */ 107 CGRP_ROOT_CPUSET_V2_MODE = (1 << 16), 108 109 /* 110 * Enable legacy local memory.events. 111 */ 112 CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17), 113 114 /* 115 * Enable recursive subtree protection 116 */ 117 CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), 118 119 /* 120 * Enable hugetlb accounting for the memory controller. 121 */ 122 CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), 123 }; 124 125 /* cftype->flags */ 126 enum { 127 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ 128 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ 129 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ 130 131 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ 132 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ 133 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ 134 135 /* internal flags, do not use outside cgroup core proper */ 136 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ 137 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ 138 __CFTYPE_ADDED = (1 << 18), 139 }; 140 141 /* 142 * cgroup_file is the handle for a file instance created in a cgroup which 143 * is used, for example, to generate file changed notifications. This can 144 * be obtained by setting cftype->file_offset. 145 */ 146 struct cgroup_file { 147 /* do not access any fields from outside cgroup core */ 148 struct kernfs_node *kn; 149 unsigned long notified_at; 150 struct timer_list notify_timer; 151 }; 152 153 /* 154 * Per-subsystem/per-cgroup state maintained by the system. This is the 155 * fundamental structural building block that controllers deal with. 156 * 157 * Fields marked with "PI:" are public and immutable and may be accessed 158 * directly without synchronization. 159 */ 160 struct cgroup_subsys_state { 161 /* PI: the cgroup that this css is attached to */ 162 struct cgroup *cgroup; 163 164 /* PI: the cgroup subsystem that this css is attached to */ 165 struct cgroup_subsys *ss; 166 167 /* reference count - access via css_[try]get() and css_put() */ 168 struct percpu_ref refcnt; 169 170 /* siblings list anchored at the parent's ->children */ 171 struct list_head sibling; 172 struct list_head children; 173 174 /* flush target list anchored at cgrp->rstat_css_list */ 175 struct list_head rstat_css_node; 176 177 /* 178 * PI: Subsys-unique ID. 0 is unused and root is always 1. The 179 * matching css can be looked up using css_from_id(). 180 */ 181 int id; 182 183 unsigned int flags; 184 185 /* 186 * Monotonically increasing unique serial number which defines a 187 * uniform order among all csses. It's guaranteed that all 188 * ->children lists are in the ascending order of ->serial_nr and 189 * used to allow interrupting and resuming iterations. 190 */ 191 u64 serial_nr; 192 193 /* 194 * Incremented by online self and children. Used to guarantee that 195 * parents are not offlined before their children. 196 */ 197 atomic_t online_cnt; 198 199 /* percpu_ref killing and RCU release */ 200 struct work_struct destroy_work; 201 struct rcu_work destroy_rwork; 202 203 /* 204 * PI: the parent css. Placed here for cache proximity to following 205 * fields of the containing structure. 206 */ 207 struct cgroup_subsys_state *parent; 208 }; 209 210 /* 211 * A css_set is a structure holding pointers to a set of 212 * cgroup_subsys_state objects. This saves space in the task struct 213 * object and speeds up fork()/exit(), since a single inc/dec and a 214 * list_add()/del() can bump the reference count on the entire cgroup 215 * set for a task. 216 */ 217 struct css_set { 218 /* 219 * Set of subsystem states, one for each subsystem. This array is 220 * immutable after creation apart from the init_css_set during 221 * subsystem registration (at boot time). 222 */ 223 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 224 225 /* reference count */ 226 refcount_t refcount; 227 228 /* 229 * For a domain cgroup, the following points to self. If threaded, 230 * to the matching cset of the nearest domain ancestor. The 231 * dom_cset provides access to the domain cgroup and its csses to 232 * which domain level resource consumptions should be charged. 233 */ 234 struct css_set *dom_cset; 235 236 /* the default cgroup associated with this css_set */ 237 struct cgroup *dfl_cgrp; 238 239 /* internal task count, protected by css_set_lock */ 240 int nr_tasks; 241 242 /* 243 * Lists running through all tasks using this cgroup group. 244 * mg_tasks lists tasks which belong to this cset but are in the 245 * process of being migrated out or in. Protected by 246 * css_set_lock, but, during migration, once tasks are moved to 247 * mg_tasks, it can be read safely while holding cgroup_mutex. 248 */ 249 struct list_head tasks; 250 struct list_head mg_tasks; 251 struct list_head dying_tasks; 252 253 /* all css_task_iters currently walking this cset */ 254 struct list_head task_iters; 255 256 /* 257 * On the default hierarchy, ->subsys[ssid] may point to a css 258 * attached to an ancestor instead of the cgroup this css_set is 259 * associated with. The following node is anchored at 260 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to 261 * iterate through all css's attached to a given cgroup. 262 */ 263 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; 264 265 /* all threaded csets whose ->dom_cset points to this cset */ 266 struct list_head threaded_csets; 267 struct list_head threaded_csets_node; 268 269 /* 270 * List running through all cgroup groups in the same hash 271 * slot. Protected by css_set_lock 272 */ 273 struct hlist_node hlist; 274 275 /* 276 * List of cgrp_cset_links pointing at cgroups referenced from this 277 * css_set. Protected by css_set_lock. 278 */ 279 struct list_head cgrp_links; 280 281 /* 282 * List of csets participating in the on-going migration either as 283 * source or destination. Protected by cgroup_mutex. 284 */ 285 struct list_head mg_src_preload_node; 286 struct list_head mg_dst_preload_node; 287 struct list_head mg_node; 288 289 /* 290 * If this cset is acting as the source of migration the following 291 * two fields are set. mg_src_cgrp and mg_dst_cgrp are 292 * respectively the source and destination cgroups of the on-going 293 * migration. mg_dst_cset is the destination cset the target tasks 294 * on this cset should be migrated to. Protected by cgroup_mutex. 295 */ 296 struct cgroup *mg_src_cgrp; 297 struct cgroup *mg_dst_cgrp; 298 struct css_set *mg_dst_cset; 299 300 /* dead and being drained, ignore for migration */ 301 bool dead; 302 303 /* For RCU-protected deletion */ 304 struct rcu_head rcu_head; 305 }; 306 307 struct cgroup_base_stat { 308 struct task_cputime cputime; 309 310 #ifdef CONFIG_SCHED_CORE 311 u64 forceidle_sum; 312 #endif 313 }; 314 315 /* 316 * rstat - cgroup scalable recursive statistics. Accounting is done 317 * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the 318 * hierarchy on reads. 319 * 320 * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are 321 * linked into the updated tree. On the following read, propagation only 322 * considers and consumes the updated tree. This makes reading O(the 323 * number of descendants which have been active since last read) instead of 324 * O(the total number of descendants). 325 * 326 * This is important because there can be a lot of (draining) cgroups which 327 * aren't active and stat may be read frequently. The combination can 328 * become very expensive. By propagating selectively, increasing reading 329 * frequency decreases the cost of each read. 330 * 331 * This struct hosts both the fields which implement the above - 332 * updated_children and updated_next - and the fields which track basic 333 * resource statistics on top of it - bsync, bstat and last_bstat. 334 */ 335 struct cgroup_rstat_cpu { 336 /* 337 * ->bsync protects ->bstat. These are the only fields which get 338 * updated in the hot path. 339 */ 340 struct u64_stats_sync bsync; 341 struct cgroup_base_stat bstat; 342 343 /* 344 * Snapshots at the last reading. These are used to calculate the 345 * deltas to propagate to the global counters. 346 */ 347 struct cgroup_base_stat last_bstat; 348 349 /* 350 * This field is used to record the cumulative per-cpu time of 351 * the cgroup and its descendants. Currently it can be read via 352 * eBPF/drgn etc, and we are still trying to determine how to 353 * expose it in the cgroupfs interface. 354 */ 355 struct cgroup_base_stat subtree_bstat; 356 357 /* 358 * Snapshots at the last reading. These are used to calculate the 359 * deltas to propagate to the per-cpu subtree_bstat. 360 */ 361 struct cgroup_base_stat last_subtree_bstat; 362 363 /* 364 * Child cgroups with stat updates on this cpu since the last read 365 * are linked on the parent's ->updated_children through 366 * ->updated_next. 367 * 368 * In addition to being more compact, singly-linked list pointing 369 * to the cgroup makes it unnecessary for each per-cpu struct to 370 * point back to the associated cgroup. 371 * 372 * Protected by per-cpu cgroup_rstat_cpu_lock. 373 */ 374 struct cgroup *updated_children; /* terminated by self cgroup */ 375 struct cgroup *updated_next; /* NULL iff not on the list */ 376 }; 377 378 struct cgroup_freezer_state { 379 /* Should the cgroup and its descendants be frozen. */ 380 bool freeze; 381 382 /* Should the cgroup actually be frozen? */ 383 int e_freeze; 384 385 /* Fields below are protected by css_set_lock */ 386 387 /* Number of frozen descendant cgroups */ 388 int nr_frozen_descendants; 389 390 /* 391 * Number of tasks, which are counted as frozen: 392 * frozen, SIGSTOPped, and PTRACEd. 393 */ 394 int nr_frozen_tasks; 395 }; 396 397 struct cgroup { 398 /* self css with NULL ->ss, points back to this cgroup */ 399 struct cgroup_subsys_state self; 400 401 unsigned long flags; /* "unsigned long" so bitops work */ 402 403 /* 404 * The depth this cgroup is at. The root is at depth zero and each 405 * step down the hierarchy increments the level. This along with 406 * ancestors[] can determine whether a given cgroup is a 407 * descendant of another without traversing the hierarchy. 408 */ 409 int level; 410 411 /* Maximum allowed descent tree depth */ 412 int max_depth; 413 414 /* 415 * Keep track of total numbers of visible and dying descent cgroups. 416 * Dying cgroups are cgroups which were deleted by a user, 417 * but are still existing because someone else is holding a reference. 418 * max_descendants is a maximum allowed number of descent cgroups. 419 * 420 * nr_descendants and nr_dying_descendants are protected 421 * by cgroup_mutex and css_set_lock. It's fine to read them holding 422 * any of cgroup_mutex and css_set_lock; for writing both locks 423 * should be held. 424 */ 425 int nr_descendants; 426 int nr_dying_descendants; 427 int max_descendants; 428 429 /* 430 * Each non-empty css_set associated with this cgroup contributes 431 * one to nr_populated_csets. The counter is zero iff this cgroup 432 * doesn't have any tasks. 433 * 434 * All children which have non-zero nr_populated_csets and/or 435 * nr_populated_children of their own contribute one to either 436 * nr_populated_domain_children or nr_populated_threaded_children 437 * depending on their type. Each counter is zero iff all cgroups 438 * of the type in the subtree proper don't have any tasks. 439 */ 440 int nr_populated_csets; 441 int nr_populated_domain_children; 442 int nr_populated_threaded_children; 443 444 int nr_threaded_children; /* # of live threaded child cgroups */ 445 446 struct kernfs_node *kn; /* cgroup kernfs entry */ 447 struct cgroup_file procs_file; /* handle for "cgroup.procs" */ 448 struct cgroup_file events_file; /* handle for "cgroup.events" */ 449 450 /* handles for "{cpu,memory,io,irq}.pressure" */ 451 struct cgroup_file psi_files[NR_PSI_RESOURCES]; 452 453 /* 454 * The bitmask of subsystems enabled on the child cgroups. 455 * ->subtree_control is the one configured through 456 * "cgroup.subtree_control" while ->subtree_ss_mask is the effective 457 * one which may have more subsystems enabled. Controller knobs 458 * are made available iff it's enabled in ->subtree_control. 459 */ 460 u16 subtree_control; 461 u16 subtree_ss_mask; 462 u16 old_subtree_control; 463 u16 old_subtree_ss_mask; 464 465 /* Private pointers for each registered subsystem */ 466 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; 467 468 struct cgroup_root *root; 469 470 /* 471 * List of cgrp_cset_links pointing at css_sets with tasks in this 472 * cgroup. Protected by css_set_lock. 473 */ 474 struct list_head cset_links; 475 476 /* 477 * On the default hierarchy, a css_set for a cgroup with some 478 * susbsys disabled will point to css's which are associated with 479 * the closest ancestor which has the subsys enabled. The 480 * following lists all css_sets which point to this cgroup's css 481 * for the given subsystem. 482 */ 483 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 484 485 /* 486 * If !threaded, self. If threaded, it points to the nearest 487 * domain ancestor. Inside a threaded subtree, cgroups are exempt 488 * from process granularity and no-internal-task constraint. 489 * Domain level resource consumptions which aren't tied to a 490 * specific task are charged to the dom_cgrp. 491 */ 492 struct cgroup *dom_cgrp; 493 struct cgroup *old_dom_cgrp; /* used while enabling threaded */ 494 495 /* per-cpu recursive resource statistics */ 496 struct cgroup_rstat_cpu __percpu *rstat_cpu; 497 struct list_head rstat_css_list; 498 499 /* cgroup basic resource statistics */ 500 struct cgroup_base_stat last_bstat; 501 struct cgroup_base_stat bstat; 502 struct prev_cputime prev_cputime; /* for printing out cputime */ 503 504 /* 505 * list of pidlists, up to two for each namespace (one for procs, one 506 * for tasks); created on demand. 507 */ 508 struct list_head pidlists; 509 struct mutex pidlist_mutex; 510 511 /* used to wait for offlining of csses */ 512 wait_queue_head_t offline_waitq; 513 514 /* used to schedule release agent */ 515 struct work_struct release_agent_work; 516 517 /* used to track pressure stalls */ 518 struct psi_group *psi; 519 520 /* used to store eBPF programs */ 521 struct cgroup_bpf bpf; 522 523 /* If there is block congestion on this cgroup. */ 524 atomic_t congestion_count; 525 526 /* Used to store internal freezer state */ 527 struct cgroup_freezer_state freezer; 528 529 #ifdef CONFIG_BPF_SYSCALL 530 struct bpf_local_storage __rcu *bpf_cgrp_storage; 531 #endif 532 533 /* All ancestors including self */ 534 struct cgroup *ancestors[]; 535 }; 536 537 /* 538 * A cgroup_root represents the root of a cgroup hierarchy, and may be 539 * associated with a kernfs_root to form an active hierarchy. This is 540 * internal to cgroup core. Don't access directly from controllers. 541 */ 542 struct cgroup_root { 543 struct kernfs_root *kf_root; 544 545 /* The bitmask of subsystems attached to this hierarchy */ 546 unsigned int subsys_mask; 547 548 /* Unique id for this hierarchy. */ 549 int hierarchy_id; 550 551 /* 552 * The root cgroup. The containing cgroup_root will be destroyed on its 553 * release. cgrp->ancestors[0] will be used overflowing into the 554 * following field. cgrp_ancestor_storage must immediately follow. 555 */ 556 struct cgroup cgrp; 557 558 /* must follow cgrp for cgrp->ancestors[0], see above */ 559 struct cgroup *cgrp_ancestor_storage; 560 561 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ 562 atomic_t nr_cgrps; 563 564 /* A list running through the active hierarchies */ 565 struct list_head root_list; 566 567 /* Hierarchy-specific flags */ 568 unsigned int flags; 569 570 /* The path to use for release notifications. */ 571 char release_agent_path[PATH_MAX]; 572 573 /* The name for this hierarchy - may be empty */ 574 char name[MAX_CGROUP_ROOT_NAMELEN]; 575 }; 576 577 /* 578 * struct cftype: handler definitions for cgroup control files 579 * 580 * When reading/writing to a file: 581 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata 582 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata 583 */ 584 struct cftype { 585 /* 586 * By convention, the name should begin with the name of the 587 * subsystem, followed by a period. Zero length string indicates 588 * end of cftype array. 589 */ 590 char name[MAX_CFTYPE_NAME]; 591 unsigned long private; 592 593 /* 594 * The maximum length of string, excluding trailing nul, that can 595 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. 596 */ 597 size_t max_write_len; 598 599 /* CFTYPE_* flags */ 600 unsigned int flags; 601 602 /* 603 * If non-zero, should contain the offset from the start of css to 604 * a struct cgroup_file field. cgroup will record the handle of 605 * the created file into it. The recorded handle can be used as 606 * long as the containing css remains accessible. 607 */ 608 unsigned int file_offset; 609 610 /* 611 * Fields used for internal bookkeeping. Initialized automatically 612 * during registration. 613 */ 614 struct cgroup_subsys *ss; /* NULL for cgroup core files */ 615 struct list_head node; /* anchored at ss->cfts */ 616 struct kernfs_ops *kf_ops; 617 618 int (*open)(struct kernfs_open_file *of); 619 void (*release)(struct kernfs_open_file *of); 620 621 /* 622 * read_u64() is a shortcut for the common case of returning a 623 * single integer. Use it in place of read() 624 */ 625 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); 626 /* 627 * read_s64() is a signed version of read_u64() 628 */ 629 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); 630 631 /* generic seq_file read interface */ 632 int (*seq_show)(struct seq_file *sf, void *v); 633 634 /* optional ops, implement all or none */ 635 void *(*seq_start)(struct seq_file *sf, loff_t *ppos); 636 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); 637 void (*seq_stop)(struct seq_file *sf, void *v); 638 639 /* 640 * write_u64() is a shortcut for the common case of accepting 641 * a single integer (as parsed by simple_strtoull) from 642 * userspace. Use in place of write(); return 0 or error. 643 */ 644 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, 645 u64 val); 646 /* 647 * write_s64() is a signed version of write_u64() 648 */ 649 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, 650 s64 val); 651 652 /* 653 * write() is the generic write callback which maps directly to 654 * kernfs write operation and overrides all other operations. 655 * Maximum write size is determined by ->max_write_len. Use 656 * of_css/cft() to access the associated css and cft. 657 */ 658 ssize_t (*write)(struct kernfs_open_file *of, 659 char *buf, size_t nbytes, loff_t off); 660 661 __poll_t (*poll)(struct kernfs_open_file *of, 662 struct poll_table_struct *pt); 663 664 #ifdef CONFIG_DEBUG_LOCK_ALLOC 665 struct lock_class_key lockdep_key; 666 #endif 667 }; 668 669 /* 670 * Control Group subsystem type. 671 * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details 672 */ 673 struct cgroup_subsys { 674 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 675 int (*css_online)(struct cgroup_subsys_state *css); 676 void (*css_offline)(struct cgroup_subsys_state *css); 677 void (*css_released)(struct cgroup_subsys_state *css); 678 void (*css_free)(struct cgroup_subsys_state *css); 679 void (*css_reset)(struct cgroup_subsys_state *css); 680 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); 681 int (*css_extra_stat_show)(struct seq_file *seq, 682 struct cgroup_subsys_state *css); 683 int (*css_local_stat_show)(struct seq_file *seq, 684 struct cgroup_subsys_state *css); 685 686 int (*can_attach)(struct cgroup_taskset *tset); 687 void (*cancel_attach)(struct cgroup_taskset *tset); 688 void (*attach)(struct cgroup_taskset *tset); 689 void (*post_attach)(void); 690 int (*can_fork)(struct task_struct *task, 691 struct css_set *cset); 692 void (*cancel_fork)(struct task_struct *task, struct css_set *cset); 693 void (*fork)(struct task_struct *task); 694 void (*exit)(struct task_struct *task); 695 void (*release)(struct task_struct *task); 696 void (*bind)(struct cgroup_subsys_state *root_css); 697 698 bool early_init:1; 699 700 /* 701 * If %true, the controller, on the default hierarchy, doesn't show 702 * up in "cgroup.controllers" or "cgroup.subtree_control", is 703 * implicitly enabled on all cgroups on the default hierarchy, and 704 * bypasses the "no internal process" constraint. This is for 705 * utility type controllers which is transparent to userland. 706 * 707 * An implicit controller can be stolen from the default hierarchy 708 * anytime and thus must be okay with offline csses from previous 709 * hierarchies coexisting with csses for the current one. 710 */ 711 bool implicit_on_dfl:1; 712 713 /* 714 * If %true, the controller, supports threaded mode on the default 715 * hierarchy. In a threaded subtree, both process granularity and 716 * no-internal-process constraint are ignored and a threaded 717 * controllers should be able to handle that. 718 * 719 * Note that as an implicit controller is automatically enabled on 720 * all cgroups on the default hierarchy, it should also be 721 * threaded. implicit && !threaded is not supported. 722 */ 723 bool threaded:1; 724 725 /* the following two fields are initialized automatically during boot */ 726 int id; 727 const char *name; 728 729 /* optional, initialized automatically during boot if not set */ 730 const char *legacy_name; 731 732 /* link to parent, protected by cgroup_lock() */ 733 struct cgroup_root *root; 734 735 /* idr for css->id */ 736 struct idr css_idr; 737 738 /* 739 * List of cftypes. Each entry is the first entry of an array 740 * terminated by zero length name. 741 */ 742 struct list_head cfts; 743 744 /* 745 * Base cftypes which are automatically registered. The two can 746 * point to the same array. 747 */ 748 struct cftype *dfl_cftypes; /* for the default hierarchy */ 749 struct cftype *legacy_cftypes; /* for the legacy hierarchies */ 750 751 /* 752 * A subsystem may depend on other subsystems. When such subsystem 753 * is enabled on a cgroup, the depended-upon subsystems are enabled 754 * together if available. Subsystems enabled due to dependency are 755 * not visible to userland until explicitly enabled. The following 756 * specifies the mask of subsystems that this one depends on. 757 */ 758 unsigned int depends_on; 759 }; 760 761 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 762 763 /** 764 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups 765 * @tsk: target task 766 * 767 * Allows cgroup operations to synchronize against threadgroup changes 768 * using a percpu_rw_semaphore. 769 */ 770 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 771 { 772 percpu_down_read(&cgroup_threadgroup_rwsem); 773 } 774 775 /** 776 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups 777 * @tsk: target task 778 * 779 * Counterpart of cgroup_threadcgroup_change_begin(). 780 */ 781 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) 782 { 783 percpu_up_read(&cgroup_threadgroup_rwsem); 784 } 785 786 #else /* CONFIG_CGROUPS */ 787 788 #define CGROUP_SUBSYS_COUNT 0 789 790 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 791 { 792 might_sleep(); 793 } 794 795 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} 796 797 #endif /* CONFIG_CGROUPS */ 798 799 #ifdef CONFIG_SOCK_CGROUP_DATA 800 801 /* 802 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains 803 * per-socket cgroup information except for memcg association. 804 * 805 * On legacy hierarchies, net_prio and net_cls controllers directly 806 * set attributes on each sock which can then be tested by the network 807 * layer. On the default hierarchy, each sock is associated with the 808 * cgroup it was created in and the networking layer can match the 809 * cgroup directly. 810 */ 811 struct sock_cgroup_data { 812 struct cgroup *cgroup; /* v2 */ 813 #ifdef CONFIG_CGROUP_NET_CLASSID 814 u32 classid; /* v1 */ 815 #endif 816 #ifdef CONFIG_CGROUP_NET_PRIO 817 u16 prioidx; /* v1 */ 818 #endif 819 }; 820 821 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) 822 { 823 #ifdef CONFIG_CGROUP_NET_PRIO 824 return READ_ONCE(skcd->prioidx); 825 #else 826 return 1; 827 #endif 828 } 829 830 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) 831 { 832 #ifdef CONFIG_CGROUP_NET_CLASSID 833 return READ_ONCE(skcd->classid); 834 #else 835 return 0; 836 #endif 837 } 838 839 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, 840 u16 prioidx) 841 { 842 #ifdef CONFIG_CGROUP_NET_PRIO 843 WRITE_ONCE(skcd->prioidx, prioidx); 844 #endif 845 } 846 847 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, 848 u32 classid) 849 { 850 #ifdef CONFIG_CGROUP_NET_CLASSID 851 WRITE_ONCE(skcd->classid, classid); 852 #endif 853 } 854 855 #else /* CONFIG_SOCK_CGROUP_DATA */ 856 857 struct sock_cgroup_data { 858 }; 859 860 #endif /* CONFIG_SOCK_CGROUP_DATA */ 861 862 #endif /* _LINUX_CGROUP_DEFS_H */ 863