1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * linux/cgroup-defs.h - basic definitions for cgroup 4 * 5 * This file provides basic type and interface. Include this file directly 6 * only if necessary to avoid cyclic dependencies. 7 */ 8 #ifndef _LINUX_CGROUP_DEFS_H 9 #define _LINUX_CGROUP_DEFS_H 10 11 #include <linux/limits.h> 12 #include <linux/list.h> 13 #include <linux/idr.h> 14 #include <linux/wait.h> 15 #include <linux/mutex.h> 16 #include <linux/rcupdate.h> 17 #include <linux/refcount.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/percpu-rwsem.h> 20 #include <linux/u64_stats_sync.h> 21 #include <linux/workqueue.h> 22 #include <linux/bpf-cgroup-defs.h> 23 #include <linux/psi_types.h> 24 25 #ifdef CONFIG_CGROUPS 26 27 struct cgroup; 28 struct cgroup_root; 29 struct cgroup_subsys; 30 struct cgroup_taskset; 31 struct kernfs_node; 32 struct kernfs_ops; 33 struct kernfs_open_file; 34 struct seq_file; 35 struct poll_table_struct; 36 37 #define MAX_CGROUP_TYPE_NAMELEN 32 38 #define MAX_CGROUP_ROOT_NAMELEN 64 39 #define MAX_CFTYPE_NAME 64 40 41 /* define the enumeration of all cgroup subsystems */ 42 #define SUBSYS(_x) _x ## _cgrp_id, 43 enum cgroup_subsys_id { 44 #include <linux/cgroup_subsys.h> 45 CGROUP_SUBSYS_COUNT, 46 }; 47 #undef SUBSYS 48 49 /* bits in struct cgroup_subsys_state flags field */ 50 enum { 51 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 52 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 53 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 54 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 55 CSS_DYING = (1 << 4), /* css is dying */ 56 }; 57 58 /* bits in struct cgroup flags field */ 59 enum { 60 /* Control Group requires release notifications to userspace */ 61 CGRP_NOTIFY_ON_RELEASE, 62 /* 63 * Clone the parent's configuration when creating a new child 64 * cpuset cgroup. For historical reasons, this option can be 65 * specified at mount time and thus is implemented here. 66 */ 67 CGRP_CPUSET_CLONE_CHILDREN, 68 69 /* Control group has to be frozen. */ 70 CGRP_FREEZE, 71 72 /* Cgroup is frozen. */ 73 CGRP_FROZEN, 74 75 /* Control group has to be killed. */ 76 CGRP_KILL, 77 }; 78 79 /* cgroup_root->flags */ 80 enum { 81 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ 82 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ 83 84 /* 85 * Consider namespaces as delegation boundaries. If this flag is 86 * set, controller specific interface files in a namespace root 87 * aren't writeable from inside the namespace. 88 */ 89 CGRP_ROOT_NS_DELEGATE = (1 << 3), 90 91 /* 92 * Reduce latencies on dynamic cgroup modifications such as task 93 * migrations and controller on/offs by disabling percpu operation on 94 * cgroup_threadgroup_rwsem. This makes hot path operations such as 95 * forks and exits into the slow path and more expensive. 96 * 97 * The static usage pattern of creating a cgroup, enabling controllers, 98 * and then seeding it with CLONE_INTO_CGROUP doesn't require write 99 * locking cgroup_threadgroup_rwsem and thus doesn't benefit from 100 * favordynmod. 101 */ 102 CGRP_ROOT_FAVOR_DYNMODS = (1 << 4), 103 104 /* 105 * Enable cpuset controller in v1 cgroup to use v2 behavior. 106 */ 107 CGRP_ROOT_CPUSET_V2_MODE = (1 << 16), 108 109 /* 110 * Enable legacy local memory.events. 111 */ 112 CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17), 113 114 /* 115 * Enable recursive subtree protection 116 */ 117 CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), 118 119 /* 120 * Enable hugetlb accounting for the memory controller. 121 */ 122 CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), 123 }; 124 125 /* cftype->flags */ 126 enum { 127 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ 128 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ 129 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ 130 131 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ 132 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ 133 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ 134 135 /* internal flags, do not use outside cgroup core proper */ 136 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ 137 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ 138 __CFTYPE_ADDED = (1 << 18), 139 }; 140 141 /* 142 * cgroup_file is the handle for a file instance created in a cgroup which 143 * is used, for example, to generate file changed notifications. This can 144 * be obtained by setting cftype->file_offset. 145 */ 146 struct cgroup_file { 147 /* do not access any fields from outside cgroup core */ 148 struct kernfs_node *kn; 149 unsigned long notified_at; 150 struct timer_list notify_timer; 151 }; 152 153 /* 154 * Per-subsystem/per-cgroup state maintained by the system. This is the 155 * fundamental structural building block that controllers deal with. 156 * 157 * Fields marked with "PI:" are public and immutable and may be accessed 158 * directly without synchronization. 159 */ 160 struct cgroup_subsys_state { 161 /* PI: the cgroup that this css is attached to */ 162 struct cgroup *cgroup; 163 164 /* PI: the cgroup subsystem that this css is attached to */ 165 struct cgroup_subsys *ss; 166 167 /* reference count - access via css_[try]get() and css_put() */ 168 struct percpu_ref refcnt; 169 170 /* siblings list anchored at the parent's ->children */ 171 struct list_head sibling; 172 struct list_head children; 173 174 /* flush target list anchored at cgrp->rstat_css_list */ 175 struct list_head rstat_css_node; 176 177 /* 178 * PI: Subsys-unique ID. 0 is unused and root is always 1. The 179 * matching css can be looked up using css_from_id(). 180 */ 181 int id; 182 183 unsigned int flags; 184 185 /* 186 * Monotonically increasing unique serial number which defines a 187 * uniform order among all csses. It's guaranteed that all 188 * ->children lists are in the ascending order of ->serial_nr and 189 * used to allow interrupting and resuming iterations. 190 */ 191 u64 serial_nr; 192 193 /* 194 * Incremented by online self and children. Used to guarantee that 195 * parents are not offlined before their children. 196 */ 197 atomic_t online_cnt; 198 199 /* percpu_ref killing and RCU release */ 200 struct work_struct destroy_work; 201 struct rcu_work destroy_rwork; 202 203 /* 204 * PI: the parent css. Placed here for cache proximity to following 205 * fields of the containing structure. 206 */ 207 struct cgroup_subsys_state *parent; 208 }; 209 210 /* 211 * A css_set is a structure holding pointers to a set of 212 * cgroup_subsys_state objects. This saves space in the task struct 213 * object and speeds up fork()/exit(), since a single inc/dec and a 214 * list_add()/del() can bump the reference count on the entire cgroup 215 * set for a task. 216 */ 217 struct css_set { 218 /* 219 * Set of subsystem states, one for each subsystem. This array is 220 * immutable after creation apart from the init_css_set during 221 * subsystem registration (at boot time). 222 */ 223 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 224 225 /* reference count */ 226 refcount_t refcount; 227 228 /* 229 * For a domain cgroup, the following points to self. If threaded, 230 * to the matching cset of the nearest domain ancestor. The 231 * dom_cset provides access to the domain cgroup and its csses to 232 * which domain level resource consumptions should be charged. 233 */ 234 struct css_set *dom_cset; 235 236 /* the default cgroup associated with this css_set */ 237 struct cgroup *dfl_cgrp; 238 239 /* internal task count, protected by css_set_lock */ 240 int nr_tasks; 241 242 /* 243 * Lists running through all tasks using this cgroup group. 244 * mg_tasks lists tasks which belong to this cset but are in the 245 * process of being migrated out or in. Protected by 246 * css_set_lock, but, during migration, once tasks are moved to 247 * mg_tasks, it can be read safely while holding cgroup_mutex. 248 */ 249 struct list_head tasks; 250 struct list_head mg_tasks; 251 struct list_head dying_tasks; 252 253 /* all css_task_iters currently walking this cset */ 254 struct list_head task_iters; 255 256 /* 257 * On the default hierarchy, ->subsys[ssid] may point to a css 258 * attached to an ancestor instead of the cgroup this css_set is 259 * associated with. The following node is anchored at 260 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to 261 * iterate through all css's attached to a given cgroup. 262 */ 263 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; 264 265 /* all threaded csets whose ->dom_cset points to this cset */ 266 struct list_head threaded_csets; 267 struct list_head threaded_csets_node; 268 269 /* 270 * List running through all cgroup groups in the same hash 271 * slot. Protected by css_set_lock 272 */ 273 struct hlist_node hlist; 274 275 /* 276 * List of cgrp_cset_links pointing at cgroups referenced from this 277 * css_set. Protected by css_set_lock. 278 */ 279 struct list_head cgrp_links; 280 281 /* 282 * List of csets participating in the on-going migration either as 283 * source or destination. Protected by cgroup_mutex. 284 */ 285 struct list_head mg_src_preload_node; 286 struct list_head mg_dst_preload_node; 287 struct list_head mg_node; 288 289 /* 290 * If this cset is acting as the source of migration the following 291 * two fields are set. mg_src_cgrp and mg_dst_cgrp are 292 * respectively the source and destination cgroups of the on-going 293 * migration. mg_dst_cset is the destination cset the target tasks 294 * on this cset should be migrated to. Protected by cgroup_mutex. 295 */ 296 struct cgroup *mg_src_cgrp; 297 struct cgroup *mg_dst_cgrp; 298 struct css_set *mg_dst_cset; 299 300 /* dead and being drained, ignore for migration */ 301 bool dead; 302 303 /* For RCU-protected deletion */ 304 struct rcu_head rcu_head; 305 }; 306 307 struct cgroup_base_stat { 308 struct task_cputime cputime; 309 310 #ifdef CONFIG_SCHED_CORE 311 u64 forceidle_sum; 312 #endif 313 }; 314 315 /* 316 * rstat - cgroup scalable recursive statistics. Accounting is done 317 * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the 318 * hierarchy on reads. 319 * 320 * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are 321 * linked into the updated tree. On the following read, propagation only 322 * considers and consumes the updated tree. This makes reading O(the 323 * number of descendants which have been active since last read) instead of 324 * O(the total number of descendants). 325 * 326 * This is important because there can be a lot of (draining) cgroups which 327 * aren't active and stat may be read frequently. The combination can 328 * become very expensive. By propagating selectively, increasing reading 329 * frequency decreases the cost of each read. 330 * 331 * This struct hosts both the fields which implement the above - 332 * updated_children and updated_next - and the fields which track basic 333 * resource statistics on top of it - bsync, bstat and last_bstat. 334 */ 335 struct cgroup_rstat_cpu { 336 /* 337 * ->bsync protects ->bstat. These are the only fields which get 338 * updated in the hot path. 339 */ 340 struct u64_stats_sync bsync; 341 struct cgroup_base_stat bstat; 342 343 /* 344 * Snapshots at the last reading. These are used to calculate the 345 * deltas to propagate to the global counters. 346 */ 347 struct cgroup_base_stat last_bstat; 348 349 /* 350 * This field is used to record the cumulative per-cpu time of 351 * the cgroup and its descendants. Currently it can be read via 352 * eBPF/drgn etc, and we are still trying to determine how to 353 * expose it in the cgroupfs interface. 354 */ 355 struct cgroup_base_stat subtree_bstat; 356 357 /* 358 * Snapshots at the last reading. These are used to calculate the 359 * deltas to propagate to the per-cpu subtree_bstat. 360 */ 361 struct cgroup_base_stat last_subtree_bstat; 362 363 /* 364 * Child cgroups with stat updates on this cpu since the last read 365 * are linked on the parent's ->updated_children through 366 * ->updated_next. 367 * 368 * In addition to being more compact, singly-linked list pointing 369 * to the cgroup makes it unnecessary for each per-cpu struct to 370 * point back to the associated cgroup. 371 * 372 * Protected by per-cpu cgroup_rstat_cpu_lock. 373 */ 374 struct cgroup *updated_children; /* terminated by self cgroup */ 375 struct cgroup *updated_next; /* NULL iff not on the list */ 376 }; 377 378 struct cgroup_freezer_state { 379 /* Should the cgroup and its descendants be frozen. */ 380 bool freeze; 381 382 /* Should the cgroup actually be frozen? */ 383 int e_freeze; 384 385 /* Fields below are protected by css_set_lock */ 386 387 /* Number of frozen descendant cgroups */ 388 int nr_frozen_descendants; 389 390 /* 391 * Number of tasks, which are counted as frozen: 392 * frozen, SIGSTOPped, and PTRACEd. 393 */ 394 int nr_frozen_tasks; 395 }; 396 397 struct cgroup { 398 /* self css with NULL ->ss, points back to this cgroup */ 399 struct cgroup_subsys_state self; 400 401 unsigned long flags; /* "unsigned long" so bitops work */ 402 403 /* 404 * The depth this cgroup is at. The root is at depth zero and each 405 * step down the hierarchy increments the level. This along with 406 * ancestors[] can determine whether a given cgroup is a 407 * descendant of another without traversing the hierarchy. 408 */ 409 int level; 410 411 /* Maximum allowed descent tree depth */ 412 int max_depth; 413 414 /* 415 * Keep track of total numbers of visible and dying descent cgroups. 416 * Dying cgroups are cgroups which were deleted by a user, 417 * but are still existing because someone else is holding a reference. 418 * max_descendants is a maximum allowed number of descent cgroups. 419 * 420 * nr_descendants and nr_dying_descendants are protected 421 * by cgroup_mutex and css_set_lock. It's fine to read them holding 422 * any of cgroup_mutex and css_set_lock; for writing both locks 423 * should be held. 424 */ 425 int nr_descendants; 426 int nr_dying_descendants; 427 int max_descendants; 428 429 /* 430 * Each non-empty css_set associated with this cgroup contributes 431 * one to nr_populated_csets. The counter is zero iff this cgroup 432 * doesn't have any tasks. 433 * 434 * All children which have non-zero nr_populated_csets and/or 435 * nr_populated_children of their own contribute one to either 436 * nr_populated_domain_children or nr_populated_threaded_children 437 * depending on their type. Each counter is zero iff all cgroups 438 * of the type in the subtree proper don't have any tasks. 439 */ 440 int nr_populated_csets; 441 int nr_populated_domain_children; 442 int nr_populated_threaded_children; 443 444 int nr_threaded_children; /* # of live threaded child cgroups */ 445 446 struct kernfs_node *kn; /* cgroup kernfs entry */ 447 struct cgroup_file procs_file; /* handle for "cgroup.procs" */ 448 struct cgroup_file events_file; /* handle for "cgroup.events" */ 449 450 /* handles for "{cpu,memory,io,irq}.pressure" */ 451 struct cgroup_file psi_files[NR_PSI_RESOURCES]; 452 453 /* 454 * The bitmask of subsystems enabled on the child cgroups. 455 * ->subtree_control is the one configured through 456 * "cgroup.subtree_control" while ->subtree_ss_mask is the effective 457 * one which may have more subsystems enabled. Controller knobs 458 * are made available iff it's enabled in ->subtree_control. 459 */ 460 u16 subtree_control; 461 u16 subtree_ss_mask; 462 u16 old_subtree_control; 463 u16 old_subtree_ss_mask; 464 465 /* Private pointers for each registered subsystem */ 466 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; 467 468 struct cgroup_root *root; 469 470 /* 471 * List of cgrp_cset_links pointing at css_sets with tasks in this 472 * cgroup. Protected by css_set_lock. 473 */ 474 struct list_head cset_links; 475 476 /* 477 * On the default hierarchy, a css_set for a cgroup with some 478 * susbsys disabled will point to css's which are associated with 479 * the closest ancestor which has the subsys enabled. The 480 * following lists all css_sets which point to this cgroup's css 481 * for the given subsystem. 482 */ 483 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 484 485 /* 486 * If !threaded, self. If threaded, it points to the nearest 487 * domain ancestor. Inside a threaded subtree, cgroups are exempt 488 * from process granularity and no-internal-task constraint. 489 * Domain level resource consumptions which aren't tied to a 490 * specific task are charged to the dom_cgrp. 491 */ 492 struct cgroup *dom_cgrp; 493 struct cgroup *old_dom_cgrp; /* used while enabling threaded */ 494 495 /* per-cpu recursive resource statistics */ 496 struct cgroup_rstat_cpu __percpu *rstat_cpu; 497 struct list_head rstat_css_list; 498 499 /* cgroup basic resource statistics */ 500 struct cgroup_base_stat last_bstat; 501 struct cgroup_base_stat bstat; 502 struct prev_cputime prev_cputime; /* for printing out cputime */ 503 504 /* 505 * list of pidlists, up to two for each namespace (one for procs, one 506 * for tasks); created on demand. 507 */ 508 struct list_head pidlists; 509 struct mutex pidlist_mutex; 510 511 /* used to wait for offlining of csses */ 512 wait_queue_head_t offline_waitq; 513 514 /* used to schedule release agent */ 515 struct work_struct release_agent_work; 516 517 /* used to track pressure stalls */ 518 struct psi_group *psi; 519 520 /* used to store eBPF programs */ 521 struct cgroup_bpf bpf; 522 523 /* If there is block congestion on this cgroup. */ 524 atomic_t congestion_count; 525 526 /* Used to store internal freezer state */ 527 struct cgroup_freezer_state freezer; 528 529 #ifdef CONFIG_BPF_SYSCALL 530 struct bpf_local_storage __rcu *bpf_cgrp_storage; 531 #endif 532 533 /* All ancestors including self */ 534 struct cgroup *ancestors[]; 535 }; 536 537 /* 538 * A cgroup_root represents the root of a cgroup hierarchy, and may be 539 * associated with a kernfs_root to form an active hierarchy. This is 540 * internal to cgroup core. Don't access directly from controllers. 541 */ 542 struct cgroup_root { 543 struct kernfs_root *kf_root; 544 545 /* The bitmask of subsystems attached to this hierarchy */ 546 unsigned int subsys_mask; 547 548 /* Unique id for this hierarchy. */ 549 int hierarchy_id; 550 551 /* 552 * The root cgroup. The containing cgroup_root will be destroyed on its 553 * release. cgrp->ancestors[0] will be used overflowing into the 554 * following field. cgrp_ancestor_storage must immediately follow. 555 */ 556 struct cgroup cgrp; 557 558 /* must follow cgrp for cgrp->ancestors[0], see above */ 559 struct cgroup *cgrp_ancestor_storage; 560 561 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ 562 atomic_t nr_cgrps; 563 564 /* A list running through the active hierarchies */ 565 struct list_head root_list; 566 struct rcu_head rcu; 567 568 /* Hierarchy-specific flags */ 569 unsigned int flags; 570 571 /* The path to use for release notifications. */ 572 char release_agent_path[PATH_MAX]; 573 574 /* The name for this hierarchy - may be empty */ 575 char name[MAX_CGROUP_ROOT_NAMELEN]; 576 }; 577 578 /* 579 * struct cftype: handler definitions for cgroup control files 580 * 581 * When reading/writing to a file: 582 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata 583 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata 584 */ 585 struct cftype { 586 /* 587 * By convention, the name should begin with the name of the 588 * subsystem, followed by a period. Zero length string indicates 589 * end of cftype array. 590 */ 591 char name[MAX_CFTYPE_NAME]; 592 unsigned long private; 593 594 /* 595 * The maximum length of string, excluding trailing nul, that can 596 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. 597 */ 598 size_t max_write_len; 599 600 /* CFTYPE_* flags */ 601 unsigned int flags; 602 603 /* 604 * If non-zero, should contain the offset from the start of css to 605 * a struct cgroup_file field. cgroup will record the handle of 606 * the created file into it. The recorded handle can be used as 607 * long as the containing css remains accessible. 608 */ 609 unsigned int file_offset; 610 611 /* 612 * Fields used for internal bookkeeping. Initialized automatically 613 * during registration. 614 */ 615 struct cgroup_subsys *ss; /* NULL for cgroup core files */ 616 struct list_head node; /* anchored at ss->cfts */ 617 struct kernfs_ops *kf_ops; 618 619 int (*open)(struct kernfs_open_file *of); 620 void (*release)(struct kernfs_open_file *of); 621 622 /* 623 * read_u64() is a shortcut for the common case of returning a 624 * single integer. Use it in place of read() 625 */ 626 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); 627 /* 628 * read_s64() is a signed version of read_u64() 629 */ 630 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); 631 632 /* generic seq_file read interface */ 633 int (*seq_show)(struct seq_file *sf, void *v); 634 635 /* optional ops, implement all or none */ 636 void *(*seq_start)(struct seq_file *sf, loff_t *ppos); 637 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); 638 void (*seq_stop)(struct seq_file *sf, void *v); 639 640 /* 641 * write_u64() is a shortcut for the common case of accepting 642 * a single integer (as parsed by simple_strtoull) from 643 * userspace. Use in place of write(); return 0 or error. 644 */ 645 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, 646 u64 val); 647 /* 648 * write_s64() is a signed version of write_u64() 649 */ 650 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, 651 s64 val); 652 653 /* 654 * write() is the generic write callback which maps directly to 655 * kernfs write operation and overrides all other operations. 656 * Maximum write size is determined by ->max_write_len. Use 657 * of_css/cft() to access the associated css and cft. 658 */ 659 ssize_t (*write)(struct kernfs_open_file *of, 660 char *buf, size_t nbytes, loff_t off); 661 662 __poll_t (*poll)(struct kernfs_open_file *of, 663 struct poll_table_struct *pt); 664 665 #ifdef CONFIG_DEBUG_LOCK_ALLOC 666 struct lock_class_key lockdep_key; 667 #endif 668 }; 669 670 /* 671 * Control Group subsystem type. 672 * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details 673 */ 674 struct cgroup_subsys { 675 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 676 int (*css_online)(struct cgroup_subsys_state *css); 677 void (*css_offline)(struct cgroup_subsys_state *css); 678 void (*css_released)(struct cgroup_subsys_state *css); 679 void (*css_free)(struct cgroup_subsys_state *css); 680 void (*css_reset)(struct cgroup_subsys_state *css); 681 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); 682 int (*css_extra_stat_show)(struct seq_file *seq, 683 struct cgroup_subsys_state *css); 684 int (*css_local_stat_show)(struct seq_file *seq, 685 struct cgroup_subsys_state *css); 686 687 int (*can_attach)(struct cgroup_taskset *tset); 688 void (*cancel_attach)(struct cgroup_taskset *tset); 689 void (*attach)(struct cgroup_taskset *tset); 690 void (*post_attach)(void); 691 int (*can_fork)(struct task_struct *task, 692 struct css_set *cset); 693 void (*cancel_fork)(struct task_struct *task, struct css_set *cset); 694 void (*fork)(struct task_struct *task); 695 void (*exit)(struct task_struct *task); 696 void (*release)(struct task_struct *task); 697 void (*bind)(struct cgroup_subsys_state *root_css); 698 699 bool early_init:1; 700 701 /* 702 * If %true, the controller, on the default hierarchy, doesn't show 703 * up in "cgroup.controllers" or "cgroup.subtree_control", is 704 * implicitly enabled on all cgroups on the default hierarchy, and 705 * bypasses the "no internal process" constraint. This is for 706 * utility type controllers which is transparent to userland. 707 * 708 * An implicit controller can be stolen from the default hierarchy 709 * anytime and thus must be okay with offline csses from previous 710 * hierarchies coexisting with csses for the current one. 711 */ 712 bool implicit_on_dfl:1; 713 714 /* 715 * If %true, the controller, supports threaded mode on the default 716 * hierarchy. In a threaded subtree, both process granularity and 717 * no-internal-process constraint are ignored and a threaded 718 * controllers should be able to handle that. 719 * 720 * Note that as an implicit controller is automatically enabled on 721 * all cgroups on the default hierarchy, it should also be 722 * threaded. implicit && !threaded is not supported. 723 */ 724 bool threaded:1; 725 726 /* the following two fields are initialized automatically during boot */ 727 int id; 728 const char *name; 729 730 /* optional, initialized automatically during boot if not set */ 731 const char *legacy_name; 732 733 /* link to parent, protected by cgroup_lock() */ 734 struct cgroup_root *root; 735 736 /* idr for css->id */ 737 struct idr css_idr; 738 739 /* 740 * List of cftypes. Each entry is the first entry of an array 741 * terminated by zero length name. 742 */ 743 struct list_head cfts; 744 745 /* 746 * Base cftypes which are automatically registered. The two can 747 * point to the same array. 748 */ 749 struct cftype *dfl_cftypes; /* for the default hierarchy */ 750 struct cftype *legacy_cftypes; /* for the legacy hierarchies */ 751 752 /* 753 * A subsystem may depend on other subsystems. When such subsystem 754 * is enabled on a cgroup, the depended-upon subsystems are enabled 755 * together if available. Subsystems enabled due to dependency are 756 * not visible to userland until explicitly enabled. The following 757 * specifies the mask of subsystems that this one depends on. 758 */ 759 unsigned int depends_on; 760 }; 761 762 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 763 764 /** 765 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups 766 * @tsk: target task 767 * 768 * Allows cgroup operations to synchronize against threadgroup changes 769 * using a percpu_rw_semaphore. 770 */ 771 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 772 { 773 percpu_down_read(&cgroup_threadgroup_rwsem); 774 } 775 776 /** 777 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups 778 * @tsk: target task 779 * 780 * Counterpart of cgroup_threadcgroup_change_begin(). 781 */ 782 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) 783 { 784 percpu_up_read(&cgroup_threadgroup_rwsem); 785 } 786 787 #else /* CONFIG_CGROUPS */ 788 789 #define CGROUP_SUBSYS_COUNT 0 790 791 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 792 { 793 might_sleep(); 794 } 795 796 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} 797 798 #endif /* CONFIG_CGROUPS */ 799 800 #ifdef CONFIG_SOCK_CGROUP_DATA 801 802 /* 803 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains 804 * per-socket cgroup information except for memcg association. 805 * 806 * On legacy hierarchies, net_prio and net_cls controllers directly 807 * set attributes on each sock which can then be tested by the network 808 * layer. On the default hierarchy, each sock is associated with the 809 * cgroup it was created in and the networking layer can match the 810 * cgroup directly. 811 */ 812 struct sock_cgroup_data { 813 struct cgroup *cgroup; /* v2 */ 814 #ifdef CONFIG_CGROUP_NET_CLASSID 815 u32 classid; /* v1 */ 816 #endif 817 #ifdef CONFIG_CGROUP_NET_PRIO 818 u16 prioidx; /* v1 */ 819 #endif 820 }; 821 822 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) 823 { 824 #ifdef CONFIG_CGROUP_NET_PRIO 825 return READ_ONCE(skcd->prioidx); 826 #else 827 return 1; 828 #endif 829 } 830 831 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) 832 { 833 #ifdef CONFIG_CGROUP_NET_CLASSID 834 return READ_ONCE(skcd->classid); 835 #else 836 return 0; 837 #endif 838 } 839 840 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, 841 u16 prioidx) 842 { 843 #ifdef CONFIG_CGROUP_NET_PRIO 844 WRITE_ONCE(skcd->prioidx, prioidx); 845 #endif 846 } 847 848 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, 849 u32 classid) 850 { 851 #ifdef CONFIG_CGROUP_NET_CLASSID 852 WRITE_ONCE(skcd->classid, classid); 853 #endif 854 } 855 856 #else /* CONFIG_SOCK_CGROUP_DATA */ 857 858 struct sock_cgroup_data { 859 }; 860 861 #endif /* CONFIG_SOCK_CGROUP_DATA */ 862 863 #endif /* _LINUX_CGROUP_DEFS_H */ 864