xref: /linux-6.15/include/linux/cgroup-defs.h (revision 66dfdff0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * linux/cgroup-defs.h - basic definitions for cgroup
4  *
5  * This file provides basic type and interface.  Include this file directly
6  * only if necessary to avoid cyclic dependencies.
7  */
8 #ifndef _LINUX_CGROUP_DEFS_H
9 #define _LINUX_CGROUP_DEFS_H
10 
11 #include <linux/limits.h>
12 #include <linux/list.h>
13 #include <linux/idr.h>
14 #include <linux/wait.h>
15 #include <linux/mutex.h>
16 #include <linux/rcupdate.h>
17 #include <linux/refcount.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/percpu-rwsem.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/workqueue.h>
22 #include <linux/bpf-cgroup.h>
23 
24 #ifdef CONFIG_CGROUPS
25 
26 struct cgroup;
27 struct cgroup_root;
28 struct cgroup_subsys;
29 struct cgroup_taskset;
30 struct kernfs_node;
31 struct kernfs_ops;
32 struct kernfs_open_file;
33 struct seq_file;
34 
35 #define MAX_CGROUP_TYPE_NAMELEN 32
36 #define MAX_CGROUP_ROOT_NAMELEN 64
37 #define MAX_CFTYPE_NAME		64
38 
39 /* define the enumeration of all cgroup subsystems */
40 #define SUBSYS(_x) _x ## _cgrp_id,
41 enum cgroup_subsys_id {
42 #include <linux/cgroup_subsys.h>
43 	CGROUP_SUBSYS_COUNT,
44 };
45 #undef SUBSYS
46 
47 /* bits in struct cgroup_subsys_state flags field */
48 enum {
49 	CSS_NO_REF	= (1 << 0), /* no reference counting for this css */
50 	CSS_ONLINE	= (1 << 1), /* between ->css_online() and ->css_offline() */
51 	CSS_RELEASED	= (1 << 2), /* refcnt reached zero, released */
52 	CSS_VISIBLE	= (1 << 3), /* css is visible to userland */
53 	CSS_DYING	= (1 << 4), /* css is dying */
54 };
55 
56 /* bits in struct cgroup flags field */
57 enum {
58 	/* Control Group requires release notifications to userspace */
59 	CGRP_NOTIFY_ON_RELEASE,
60 	/*
61 	 * Clone the parent's configuration when creating a new child
62 	 * cpuset cgroup.  For historical reasons, this option can be
63 	 * specified at mount time and thus is implemented here.
64 	 */
65 	CGRP_CPUSET_CLONE_CHILDREN,
66 };
67 
68 /* cgroup_root->flags */
69 enum {
70 	CGRP_ROOT_NOPREFIX	= (1 << 1), /* mounted subsystems have no named prefix */
71 	CGRP_ROOT_XATTR		= (1 << 2), /* supports extended attributes */
72 
73 	/*
74 	 * Consider namespaces as delegation boundaries.  If this flag is
75 	 * set, controller specific interface files in a namespace root
76 	 * aren't writeable from inside the namespace.
77 	 */
78 	CGRP_ROOT_NS_DELEGATE	= (1 << 3),
79 
80 	/*
81 	 * Enable cpuset controller in v1 cgroup to use v2 behavior.
82 	 */
83 	CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
84 };
85 
86 /* cftype->flags */
87 enum {
88 	CFTYPE_ONLY_ON_ROOT	= (1 << 0),	/* only create on root cgrp */
89 	CFTYPE_NOT_ON_ROOT	= (1 << 1),	/* don't create on root cgrp */
90 	CFTYPE_NS_DELEGATABLE	= (1 << 2),	/* writeable beyond delegation boundaries */
91 
92 	CFTYPE_NO_PREFIX	= (1 << 3),	/* (DON'T USE FOR NEW FILES) no subsys prefix */
93 	CFTYPE_WORLD_WRITABLE	= (1 << 4),	/* (DON'T USE FOR NEW FILES) S_IWUGO */
94 
95 	/* internal flags, do not use outside cgroup core proper */
96 	__CFTYPE_ONLY_ON_DFL	= (1 << 16),	/* only on default hierarchy */
97 	__CFTYPE_NOT_ON_DFL	= (1 << 17),	/* not on default hierarchy */
98 };
99 
100 /*
101  * cgroup_file is the handle for a file instance created in a cgroup which
102  * is used, for example, to generate file changed notifications.  This can
103  * be obtained by setting cftype->file_offset.
104  */
105 struct cgroup_file {
106 	/* do not access any fields from outside cgroup core */
107 	struct kernfs_node *kn;
108 };
109 
110 /*
111  * Per-subsystem/per-cgroup state maintained by the system.  This is the
112  * fundamental structural building block that controllers deal with.
113  *
114  * Fields marked with "PI:" are public and immutable and may be accessed
115  * directly without synchronization.
116  */
117 struct cgroup_subsys_state {
118 	/* PI: the cgroup that this css is attached to */
119 	struct cgroup *cgroup;
120 
121 	/* PI: the cgroup subsystem that this css is attached to */
122 	struct cgroup_subsys *ss;
123 
124 	/* reference count - access via css_[try]get() and css_put() */
125 	struct percpu_ref refcnt;
126 
127 	/* siblings list anchored at the parent's ->children */
128 	struct list_head sibling;
129 	struct list_head children;
130 
131 	/*
132 	 * PI: Subsys-unique ID.  0 is unused and root is always 1.  The
133 	 * matching css can be looked up using css_from_id().
134 	 */
135 	int id;
136 
137 	unsigned int flags;
138 
139 	/*
140 	 * Monotonically increasing unique serial number which defines a
141 	 * uniform order among all csses.  It's guaranteed that all
142 	 * ->children lists are in the ascending order of ->serial_nr and
143 	 * used to allow interrupting and resuming iterations.
144 	 */
145 	u64 serial_nr;
146 
147 	/*
148 	 * Incremented by online self and children.  Used to guarantee that
149 	 * parents are not offlined before their children.
150 	 */
151 	atomic_t online_cnt;
152 
153 	/* percpu_ref killing and RCU release */
154 	struct rcu_head rcu_head;
155 	struct work_struct destroy_work;
156 
157 	/*
158 	 * PI: the parent css.	Placed here for cache proximity to following
159 	 * fields of the containing structure.
160 	 */
161 	struct cgroup_subsys_state *parent;
162 };
163 
164 /*
165  * A css_set is a structure holding pointers to a set of
166  * cgroup_subsys_state objects. This saves space in the task struct
167  * object and speeds up fork()/exit(), since a single inc/dec and a
168  * list_add()/del() can bump the reference count on the entire cgroup
169  * set for a task.
170  */
171 struct css_set {
172 	/*
173 	 * Set of subsystem states, one for each subsystem. This array is
174 	 * immutable after creation apart from the init_css_set during
175 	 * subsystem registration (at boot time).
176 	 */
177 	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
178 
179 	/* reference count */
180 	refcount_t refcount;
181 
182 	/*
183 	 * For a domain cgroup, the following points to self.  If threaded,
184 	 * to the matching cset of the nearest domain ancestor.  The
185 	 * dom_cset provides access to the domain cgroup and its csses to
186 	 * which domain level resource consumptions should be charged.
187 	 */
188 	struct css_set *dom_cset;
189 
190 	/* the default cgroup associated with this css_set */
191 	struct cgroup *dfl_cgrp;
192 
193 	/* internal task count, protected by css_set_lock */
194 	int nr_tasks;
195 
196 	/*
197 	 * Lists running through all tasks using this cgroup group.
198 	 * mg_tasks lists tasks which belong to this cset but are in the
199 	 * process of being migrated out or in.  Protected by
200 	 * css_set_rwsem, but, during migration, once tasks are moved to
201 	 * mg_tasks, it can be read safely while holding cgroup_mutex.
202 	 */
203 	struct list_head tasks;
204 	struct list_head mg_tasks;
205 
206 	/* all css_task_iters currently walking this cset */
207 	struct list_head task_iters;
208 
209 	/*
210 	 * On the default hierarhcy, ->subsys[ssid] may point to a css
211 	 * attached to an ancestor instead of the cgroup this css_set is
212 	 * associated with.  The following node is anchored at
213 	 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
214 	 * iterate through all css's attached to a given cgroup.
215 	 */
216 	struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
217 
218 	/* all threaded csets whose ->dom_cset points to this cset */
219 	struct list_head threaded_csets;
220 	struct list_head threaded_csets_node;
221 
222 	/*
223 	 * List running through all cgroup groups in the same hash
224 	 * slot. Protected by css_set_lock
225 	 */
226 	struct hlist_node hlist;
227 
228 	/*
229 	 * List of cgrp_cset_links pointing at cgroups referenced from this
230 	 * css_set.  Protected by css_set_lock.
231 	 */
232 	struct list_head cgrp_links;
233 
234 	/*
235 	 * List of csets participating in the on-going migration either as
236 	 * source or destination.  Protected by cgroup_mutex.
237 	 */
238 	struct list_head mg_preload_node;
239 	struct list_head mg_node;
240 
241 	/*
242 	 * If this cset is acting as the source of migration the following
243 	 * two fields are set.  mg_src_cgrp and mg_dst_cgrp are
244 	 * respectively the source and destination cgroups of the on-going
245 	 * migration.  mg_dst_cset is the destination cset the target tasks
246 	 * on this cset should be migrated to.  Protected by cgroup_mutex.
247 	 */
248 	struct cgroup *mg_src_cgrp;
249 	struct cgroup *mg_dst_cgrp;
250 	struct css_set *mg_dst_cset;
251 
252 	/* dead and being drained, ignore for migration */
253 	bool dead;
254 
255 	/* For RCU-protected deletion */
256 	struct rcu_head rcu_head;
257 };
258 
259 /*
260  * cgroup basic resource usage statistics.  Accounting is done per-cpu in
261  * cgroup_cpu_stat which is then lazily propagated up the hierarchy on
262  * reads.
263  *
264  * When a stat gets updated, the cgroup_cpu_stat and its ancestors are
265  * linked into the updated tree.  On the following read, propagation only
266  * considers and consumes the updated tree.  This makes reading O(the
267  * number of descendants which have been active since last read) instead of
268  * O(the total number of descendants).
269  *
270  * This is important because there can be a lot of (draining) cgroups which
271  * aren't active and stat may be read frequently.  The combination can
272  * become very expensive.  By propagating selectively, increasing reading
273  * frequency decreases the cost of each read.
274  */
275 struct cgroup_cpu_stat {
276 	/*
277 	 * ->sync protects all the current counters.  These are the only
278 	 * fields which get updated in the hot path.
279 	 */
280 	struct u64_stats_sync sync;
281 	struct task_cputime cputime;
282 
283 	/*
284 	 * Snapshots at the last reading.  These are used to calculate the
285 	 * deltas to propagate to the global counters.
286 	 */
287 	struct task_cputime last_cputime;
288 
289 	/*
290 	 * Child cgroups with stat updates on this cpu since the last read
291 	 * are linked on the parent's ->updated_children through
292 	 * ->updated_next.
293 	 *
294 	 * In addition to being more compact, singly-linked list pointing
295 	 * to the cgroup makes it unnecessary for each per-cpu struct to
296 	 * point back to the associated cgroup.
297 	 *
298 	 * Protected by per-cpu cgroup_cpu_stat_lock.
299 	 */
300 	struct cgroup *updated_children;	/* terminated by self cgroup */
301 	struct cgroup *updated_next;		/* NULL iff not on the list */
302 };
303 
304 struct cgroup_stat {
305 	/* per-cpu statistics are collected into the folowing global counters */
306 	struct task_cputime cputime;
307 	struct prev_cputime prev_cputime;
308 };
309 
310 struct cgroup {
311 	/* self css with NULL ->ss, points back to this cgroup */
312 	struct cgroup_subsys_state self;
313 
314 	unsigned long flags;		/* "unsigned long" so bitops work */
315 
316 	/*
317 	 * idr allocated in-hierarchy ID.
318 	 *
319 	 * ID 0 is not used, the ID of the root cgroup is always 1, and a
320 	 * new cgroup will be assigned with a smallest available ID.
321 	 *
322 	 * Allocating/Removing ID must be protected by cgroup_mutex.
323 	 */
324 	int id;
325 
326 	/*
327 	 * The depth this cgroup is at.  The root is at depth zero and each
328 	 * step down the hierarchy increments the level.  This along with
329 	 * ancestor_ids[] can determine whether a given cgroup is a
330 	 * descendant of another without traversing the hierarchy.
331 	 */
332 	int level;
333 
334 	/* Maximum allowed descent tree depth */
335 	int max_depth;
336 
337 	/*
338 	 * Keep track of total numbers of visible and dying descent cgroups.
339 	 * Dying cgroups are cgroups which were deleted by a user,
340 	 * but are still existing because someone else is holding a reference.
341 	 * max_descendants is a maximum allowed number of descent cgroups.
342 	 */
343 	int nr_descendants;
344 	int nr_dying_descendants;
345 	int max_descendants;
346 
347 	/*
348 	 * Each non-empty css_set associated with this cgroup contributes
349 	 * one to nr_populated_csets.  The counter is zero iff this cgroup
350 	 * doesn't have any tasks.
351 	 *
352 	 * All children which have non-zero nr_populated_csets and/or
353 	 * nr_populated_children of their own contribute one to either
354 	 * nr_populated_domain_children or nr_populated_threaded_children
355 	 * depending on their type.  Each counter is zero iff all cgroups
356 	 * of the type in the subtree proper don't have any tasks.
357 	 */
358 	int nr_populated_csets;
359 	int nr_populated_domain_children;
360 	int nr_populated_threaded_children;
361 
362 	int nr_threaded_children;	/* # of live threaded child cgroups */
363 
364 	struct kernfs_node *kn;		/* cgroup kernfs entry */
365 	struct cgroup_file procs_file;	/* handle for "cgroup.procs" */
366 	struct cgroup_file events_file;	/* handle for "cgroup.events" */
367 
368 	/*
369 	 * The bitmask of subsystems enabled on the child cgroups.
370 	 * ->subtree_control is the one configured through
371 	 * "cgroup.subtree_control" while ->child_ss_mask is the effective
372 	 * one which may have more subsystems enabled.  Controller knobs
373 	 * are made available iff it's enabled in ->subtree_control.
374 	 */
375 	u16 subtree_control;
376 	u16 subtree_ss_mask;
377 	u16 old_subtree_control;
378 	u16 old_subtree_ss_mask;
379 
380 	/* Private pointers for each registered subsystem */
381 	struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
382 
383 	struct cgroup_root *root;
384 
385 	/*
386 	 * List of cgrp_cset_links pointing at css_sets with tasks in this
387 	 * cgroup.  Protected by css_set_lock.
388 	 */
389 	struct list_head cset_links;
390 
391 	/*
392 	 * On the default hierarchy, a css_set for a cgroup with some
393 	 * susbsys disabled will point to css's which are associated with
394 	 * the closest ancestor which has the subsys enabled.  The
395 	 * following lists all css_sets which point to this cgroup's css
396 	 * for the given subsystem.
397 	 */
398 	struct list_head e_csets[CGROUP_SUBSYS_COUNT];
399 
400 	/*
401 	 * If !threaded, self.  If threaded, it points to the nearest
402 	 * domain ancestor.  Inside a threaded subtree, cgroups are exempt
403 	 * from process granularity and no-internal-task constraint.
404 	 * Domain level resource consumptions which aren't tied to a
405 	 * specific task are charged to the dom_cgrp.
406 	 */
407 	struct cgroup *dom_cgrp;
408 
409 	/* cgroup basic resource statistics */
410 	struct cgroup_cpu_stat __percpu *cpu_stat;
411 	struct cgroup_stat pending_stat;	/* pending from children */
412 	struct cgroup_stat stat;
413 
414 	/*
415 	 * list of pidlists, up to two for each namespace (one for procs, one
416 	 * for tasks); created on demand.
417 	 */
418 	struct list_head pidlists;
419 	struct mutex pidlist_mutex;
420 
421 	/* used to wait for offlining of csses */
422 	wait_queue_head_t offline_waitq;
423 
424 	/* used to schedule release agent */
425 	struct work_struct release_agent_work;
426 
427 	/* used to store eBPF programs */
428 	struct cgroup_bpf bpf;
429 
430 	/* ids of the ancestors at each level including self */
431 	int ancestor_ids[];
432 };
433 
434 /*
435  * A cgroup_root represents the root of a cgroup hierarchy, and may be
436  * associated with a kernfs_root to form an active hierarchy.  This is
437  * internal to cgroup core.  Don't access directly from controllers.
438  */
439 struct cgroup_root {
440 	struct kernfs_root *kf_root;
441 
442 	/* The bitmask of subsystems attached to this hierarchy */
443 	unsigned int subsys_mask;
444 
445 	/* Unique id for this hierarchy. */
446 	int hierarchy_id;
447 
448 	/* The root cgroup.  Root is destroyed on its release. */
449 	struct cgroup cgrp;
450 
451 	/* for cgrp->ancestor_ids[0] */
452 	int cgrp_ancestor_id_storage;
453 
454 	/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
455 	atomic_t nr_cgrps;
456 
457 	/* A list running through the active hierarchies */
458 	struct list_head root_list;
459 
460 	/* Hierarchy-specific flags */
461 	unsigned int flags;
462 
463 	/* IDs for cgroups in this hierarchy */
464 	struct idr cgroup_idr;
465 
466 	/* The path to use for release notifications. */
467 	char release_agent_path[PATH_MAX];
468 
469 	/* The name for this hierarchy - may be empty */
470 	char name[MAX_CGROUP_ROOT_NAMELEN];
471 };
472 
473 /*
474  * struct cftype: handler definitions for cgroup control files
475  *
476  * When reading/writing to a file:
477  *	- the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
478  *	- the 'cftype' of the file is file->f_path.dentry->d_fsdata
479  */
480 struct cftype {
481 	/*
482 	 * By convention, the name should begin with the name of the
483 	 * subsystem, followed by a period.  Zero length string indicates
484 	 * end of cftype array.
485 	 */
486 	char name[MAX_CFTYPE_NAME];
487 	unsigned long private;
488 
489 	/*
490 	 * The maximum length of string, excluding trailing nul, that can
491 	 * be passed to write.  If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
492 	 */
493 	size_t max_write_len;
494 
495 	/* CFTYPE_* flags */
496 	unsigned int flags;
497 
498 	/*
499 	 * If non-zero, should contain the offset from the start of css to
500 	 * a struct cgroup_file field.  cgroup will record the handle of
501 	 * the created file into it.  The recorded handle can be used as
502 	 * long as the containing css remains accessible.
503 	 */
504 	unsigned int file_offset;
505 
506 	/*
507 	 * Fields used for internal bookkeeping.  Initialized automatically
508 	 * during registration.
509 	 */
510 	struct cgroup_subsys *ss;	/* NULL for cgroup core files */
511 	struct list_head node;		/* anchored at ss->cfts */
512 	struct kernfs_ops *kf_ops;
513 
514 	int (*open)(struct kernfs_open_file *of);
515 	void (*release)(struct kernfs_open_file *of);
516 
517 	/*
518 	 * read_u64() is a shortcut for the common case of returning a
519 	 * single integer. Use it in place of read()
520 	 */
521 	u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
522 	/*
523 	 * read_s64() is a signed version of read_u64()
524 	 */
525 	s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
526 
527 	/* generic seq_file read interface */
528 	int (*seq_show)(struct seq_file *sf, void *v);
529 
530 	/* optional ops, implement all or none */
531 	void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
532 	void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
533 	void (*seq_stop)(struct seq_file *sf, void *v);
534 
535 	/*
536 	 * write_u64() is a shortcut for the common case of accepting
537 	 * a single integer (as parsed by simple_strtoull) from
538 	 * userspace. Use in place of write(); return 0 or error.
539 	 */
540 	int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
541 			 u64 val);
542 	/*
543 	 * write_s64() is a signed version of write_u64()
544 	 */
545 	int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
546 			 s64 val);
547 
548 	/*
549 	 * write() is the generic write callback which maps directly to
550 	 * kernfs write operation and overrides all other operations.
551 	 * Maximum write size is determined by ->max_write_len.  Use
552 	 * of_css/cft() to access the associated css and cft.
553 	 */
554 	ssize_t (*write)(struct kernfs_open_file *of,
555 			 char *buf, size_t nbytes, loff_t off);
556 
557 #ifdef CONFIG_DEBUG_LOCK_ALLOC
558 	struct lock_class_key	lockdep_key;
559 #endif
560 };
561 
562 /*
563  * Control Group subsystem type.
564  * See Documentation/cgroup-v1/cgroups.txt for details
565  */
566 struct cgroup_subsys {
567 	struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
568 	int (*css_online)(struct cgroup_subsys_state *css);
569 	void (*css_offline)(struct cgroup_subsys_state *css);
570 	void (*css_released)(struct cgroup_subsys_state *css);
571 	void (*css_free)(struct cgroup_subsys_state *css);
572 	void (*css_reset)(struct cgroup_subsys_state *css);
573 	int (*css_extra_stat_show)(struct seq_file *seq,
574 				   struct cgroup_subsys_state *css);
575 
576 	int (*can_attach)(struct cgroup_taskset *tset);
577 	void (*cancel_attach)(struct cgroup_taskset *tset);
578 	void (*attach)(struct cgroup_taskset *tset);
579 	void (*post_attach)(void);
580 	int (*can_fork)(struct task_struct *task);
581 	void (*cancel_fork)(struct task_struct *task);
582 	void (*fork)(struct task_struct *task);
583 	void (*exit)(struct task_struct *task);
584 	void (*free)(struct task_struct *task);
585 	void (*bind)(struct cgroup_subsys_state *root_css);
586 
587 	bool early_init:1;
588 
589 	/*
590 	 * If %true, the controller, on the default hierarchy, doesn't show
591 	 * up in "cgroup.controllers" or "cgroup.subtree_control", is
592 	 * implicitly enabled on all cgroups on the default hierarchy, and
593 	 * bypasses the "no internal process" constraint.  This is for
594 	 * utility type controllers which is transparent to userland.
595 	 *
596 	 * An implicit controller can be stolen from the default hierarchy
597 	 * anytime and thus must be okay with offline csses from previous
598 	 * hierarchies coexisting with csses for the current one.
599 	 */
600 	bool implicit_on_dfl:1;
601 
602 	/*
603 	 * If %true, the controller, supports threaded mode on the default
604 	 * hierarchy.  In a threaded subtree, both process granularity and
605 	 * no-internal-process constraint are ignored and a threaded
606 	 * controllers should be able to handle that.
607 	 *
608 	 * Note that as an implicit controller is automatically enabled on
609 	 * all cgroups on the default hierarchy, it should also be
610 	 * threaded.  implicit && !threaded is not supported.
611 	 */
612 	bool threaded:1;
613 
614 	/*
615 	 * If %false, this subsystem is properly hierarchical -
616 	 * configuration, resource accounting and restriction on a parent
617 	 * cgroup cover those of its children.  If %true, hierarchy support
618 	 * is broken in some ways - some subsystems ignore hierarchy
619 	 * completely while others are only implemented half-way.
620 	 *
621 	 * It's now disallowed to create nested cgroups if the subsystem is
622 	 * broken and cgroup core will emit a warning message on such
623 	 * cases.  Eventually, all subsystems will be made properly
624 	 * hierarchical and this will go away.
625 	 */
626 	bool broken_hierarchy:1;
627 	bool warned_broken_hierarchy:1;
628 
629 	/* the following two fields are initialized automtically during boot */
630 	int id;
631 	const char *name;
632 
633 	/* optional, initialized automatically during boot if not set */
634 	const char *legacy_name;
635 
636 	/* link to parent, protected by cgroup_lock() */
637 	struct cgroup_root *root;
638 
639 	/* idr for css->id */
640 	struct idr css_idr;
641 
642 	/*
643 	 * List of cftypes.  Each entry is the first entry of an array
644 	 * terminated by zero length name.
645 	 */
646 	struct list_head cfts;
647 
648 	/*
649 	 * Base cftypes which are automatically registered.  The two can
650 	 * point to the same array.
651 	 */
652 	struct cftype *dfl_cftypes;	/* for the default hierarchy */
653 	struct cftype *legacy_cftypes;	/* for the legacy hierarchies */
654 
655 	/*
656 	 * A subsystem may depend on other subsystems.  When such subsystem
657 	 * is enabled on a cgroup, the depended-upon subsystems are enabled
658 	 * together if available.  Subsystems enabled due to dependency are
659 	 * not visible to userland until explicitly enabled.  The following
660 	 * specifies the mask of subsystems that this one depends on.
661 	 */
662 	unsigned int depends_on;
663 };
664 
665 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
666 
667 /**
668  * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
669  * @tsk: target task
670  *
671  * Allows cgroup operations to synchronize against threadgroup changes
672  * using a percpu_rw_semaphore.
673  */
674 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
675 {
676 	percpu_down_read(&cgroup_threadgroup_rwsem);
677 }
678 
679 /**
680  * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
681  * @tsk: target task
682  *
683  * Counterpart of cgroup_threadcgroup_change_begin().
684  */
685 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
686 {
687 	percpu_up_read(&cgroup_threadgroup_rwsem);
688 }
689 
690 #else	/* CONFIG_CGROUPS */
691 
692 #define CGROUP_SUBSYS_COUNT 0
693 
694 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
695 {
696 	might_sleep();
697 }
698 
699 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
700 
701 #endif	/* CONFIG_CGROUPS */
702 
703 #ifdef CONFIG_SOCK_CGROUP_DATA
704 
705 /*
706  * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
707  * per-socket cgroup information except for memcg association.
708  *
709  * On legacy hierarchies, net_prio and net_cls controllers directly set
710  * attributes on each sock which can then be tested by the network layer.
711  * On the default hierarchy, each sock is associated with the cgroup it was
712  * created in and the networking layer can match the cgroup directly.
713  *
714  * To avoid carrying all three cgroup related fields separately in sock,
715  * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
716  * On boot, sock_cgroup_data records the cgroup that the sock was created
717  * in so that cgroup2 matches can be made; however, once either net_prio or
718  * net_cls starts being used, the area is overriden to carry prioidx and/or
719  * classid.  The two modes are distinguished by whether the lowest bit is
720  * set.  Clear bit indicates cgroup pointer while set bit prioidx and
721  * classid.
722  *
723  * While userland may start using net_prio or net_cls at any time, once
724  * either is used, cgroup2 matching no longer works.  There is no reason to
725  * mix the two and this is in line with how legacy and v2 compatibility is
726  * handled.  On mode switch, cgroup references which are already being
727  * pointed to by socks may be leaked.  While this can be remedied by adding
728  * synchronization around sock_cgroup_data, given that the number of leaked
729  * cgroups is bound and highly unlikely to be high, this seems to be the
730  * better trade-off.
731  */
732 struct sock_cgroup_data {
733 	union {
734 #ifdef __LITTLE_ENDIAN
735 		struct {
736 			u8	is_data;
737 			u8	padding;
738 			u16	prioidx;
739 			u32	classid;
740 		} __packed;
741 #else
742 		struct {
743 			u32	classid;
744 			u16	prioidx;
745 			u8	padding;
746 			u8	is_data;
747 		} __packed;
748 #endif
749 		u64		val;
750 	};
751 };
752 
753 /*
754  * There's a theoretical window where the following accessors race with
755  * updaters and return part of the previous pointer as the prioidx or
756  * classid.  Such races are short-lived and the result isn't critical.
757  */
758 static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
759 {
760 	/* fallback to 1 which is always the ID of the root cgroup */
761 	return (skcd->is_data & 1) ? skcd->prioidx : 1;
762 }
763 
764 static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
765 {
766 	/* fallback to 0 which is the unconfigured default classid */
767 	return (skcd->is_data & 1) ? skcd->classid : 0;
768 }
769 
770 /*
771  * If invoked concurrently, the updaters may clobber each other.  The
772  * caller is responsible for synchronization.
773  */
774 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
775 					   u16 prioidx)
776 {
777 	struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
778 
779 	if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
780 		return;
781 
782 	if (!(skcd_buf.is_data & 1)) {
783 		skcd_buf.val = 0;
784 		skcd_buf.is_data = 1;
785 	}
786 
787 	skcd_buf.prioidx = prioidx;
788 	WRITE_ONCE(skcd->val, skcd_buf.val);	/* see sock_cgroup_ptr() */
789 }
790 
791 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
792 					   u32 classid)
793 {
794 	struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
795 
796 	if (sock_cgroup_classid(&skcd_buf) == classid)
797 		return;
798 
799 	if (!(skcd_buf.is_data & 1)) {
800 		skcd_buf.val = 0;
801 		skcd_buf.is_data = 1;
802 	}
803 
804 	skcd_buf.classid = classid;
805 	WRITE_ONCE(skcd->val, skcd_buf.val);	/* see sock_cgroup_ptr() */
806 }
807 
808 #else	/* CONFIG_SOCK_CGROUP_DATA */
809 
810 struct sock_cgroup_data {
811 };
812 
813 #endif	/* CONFIG_SOCK_CGROUP_DATA */
814 
815 #endif	/* _LINUX_CGROUP_DEFS_H */
816