| /linux-6.15/tools/testing/selftests/bpf/prog_tests/ |
| H A D | cgroup_hierarchical_stats.c | 54 } cgroups[] = { variable 64 #define N_CGROUPS ARRAY_SIZE(cgroups) 133 fd = create_and_get_cgroup(cgroups[i].path); in setup_cgroups() 137 cgroups[i].fd = fd; in setup_cgroups() 138 cgroups[i].id = get_cgroup_id(cgroups[i].path); in setup_cgroups() 147 close(cgroups[i].fd); in cleanup_cgroups() 175 if (join_parent_cgroup(cgroups[i].path)) in attach_processes() 220 attach_counters[i] = get_attach_counter(cgroups[i].id, in check_attach_counters() 221 cgroups[i].name); in check_attach_counters() 288 err = setup_cgroup_iter(*skel, cgroups[i].fd, cgroups[i].name); in setup_progs() [all …]
|
| /linux-6.15/tools/cgroup/ |
| H A D | memcg_shrinker.py | 11 cgroups = {} 17 cgroups[ino] = path 20 return cgroups 44 cgroups = scan_cgroups("/sys/fs/cgroup/") 58 cg = cgroups[ino]
|
| /linux-6.15/Documentation/admin-guide/cgroup-v1/ |
| H A D | cgroups.rst | 21 1.1 What are cgroups ? 22 1.2 Why are cgroups needed ? 23 1.3 How are cgroups implemented ? 26 1.6 How do I use cgroups ? 41 1.1 What are cgroups ? 85 1.2 Why are cgroups needed ? 107 cgroups. 152 proliferation of such cgroups. 172 1.3 How are cgroups implemented ? 311 1.6 How do I use cgroups ? [all …]
|
| H A D | net_cls.rst | 9 different priorities to packets from different cgroups. 13 Creating a net_cls cgroups instance creates a net_cls.classid file.
|
| H A D | devices.rst | 43 Any task can move itself between cgroups. This clearly won't 60 device cgroups maintain hierarchy by making sure a cgroup never has more 121 not be possible once the device cgroups has children. 126 device cgroups is implemented internally using a behavior (ALLOW, DENY) and a
|
| H A D | freezer-subsystem.rst | 9 whole. The cgroup freezer uses cgroups to describe the set of tasks to 57 tasks belonging to the cgroup and all its descendant cgroups. Each 73 to the cgroup or one of its descendant cgroups until the new task is 79 descendant cgroups.
|
| H A D | index.rst | 10 cgroups
|
| H A D | memory.rst | 293 The reclaim algorithm has not been modified for cgroups, except that 409 2. Prepare the cgroups (see :ref:`Why are cgroups needed? 410 <cgroups-why-needed>` for the background information):: 681 The hierarchy is created by creating the appropriate cgroups in the 745 reclaiming memory for balancing between memory cgroups 765 API (see cgroups.txt). It allows to register multiple memory and memsw 790 API (See cgroups.txt). It allows to register multiple OOM notification 944 .. [3] Emelianov, Pavel. Resource controllers based on process cgroups 946 .. [4] Emelianov, Pavel. RSS controller based on process cgroups (v2) 948 .. [5] Emelianov, Pavel. RSS controller based on process cgroups (v3) [all …]
|
| /linux-6.15/tools/perf/util/ |
| H A D | cgroup.c | 569 down_write(&env->cgroups.lock); in cgroup__findnew() 570 cgrp = __cgroup__findnew(&env->cgroups.tree, id, true, path); in cgroup__findnew() 571 up_write(&env->cgroups.lock); in cgroup__findnew() 584 down_read(&env->cgroups.lock); in cgroup__find() 585 cgrp = __cgroup__findnew(&env->cgroups.tree, id, false, NULL); in cgroup__find() 586 up_read(&env->cgroups.lock); in cgroup__find() 595 down_write(&env->cgroups.lock); in perf_env__purge_cgroups() 596 while (!RB_EMPTY_ROOT(&env->cgroups.tree)) { in perf_env__purge_cgroups() 597 node = rb_first(&env->cgroups.tree); in perf_env__purge_cgroups() 600 rb_erase(node, &env->cgroups.tree); in perf_env__purge_cgroups() [all …]
|
| H A D | bpf_lock_contention.c | 286 read_all_cgroups(&con->cgroups); in lock_contention_prepare() 519 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id); in lock_contention_get_name() 737 while (!RB_EMPTY_ROOT(&con->cgroups)) { in lock_contention_finish() 738 struct rb_node *node = rb_first(&con->cgroups); in lock_contention_finish() 741 rb_erase(node, &con->cgroups); in lock_contention_finish()
|
| H A D | cgroup.h | 31 int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups,
|
| H A D | lock-contention.h | 144 struct rb_root cgroups; member
|
| /linux-6.15/tools/testing/selftests/bpf/progs/ |
| H A D | percpu_alloc_cgrp_local_storage.c | 30 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, in BPF_PROG() 56 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG() 89 e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); in BPF_PROG()
|
| H A D | rcu_read_lock.c | 33 struct css_set *cgroups; in get_cgroup_id() local 41 cgroups = task->cgroups; in get_cgroup_id() 42 if (!cgroups) in get_cgroup_id() 44 cgroup_id = cgroups->dfl_cgrp->kn->id; in get_cgroup_id()
|
| H A D | cgrp_ls_recursion.c | 59 __on_update(task->cgroups->dfl_cgrp); in BPF_PROG() 92 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| H A D | cgrp_ls_sleepable.c | 86 __no_rcu_lock(task->cgroups->dfl_cgrp); in no_rcu_lock() 118 cgrp = task->cgroups->dfl_cgrp; in yes_rcu_lock()
|
| H A D | cgrp_ls_tp_btf.c | 86 __on_enter(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG() 124 __on_exit(regs, id, task->cgroups->dfl_cgrp); in BPF_PROG()
|
| /linux-6.15/Documentation/admin-guide/ |
| H A D | cgroup-v2.rst | 432 both cgroups. 850 cgroups. 872 all cgroups. 898 all cgroups. 923 cgroups. 1008 of any ancestor cgroups. If any of ancestor cgroups is frozen, the 1019 create new sub-cgroups. 1234 cgroups. 1717 cgroups. 1782 cgroups. [all …]
|
| /linux-6.15/tools/perf/Documentation/ |
| H A D | perf-bench.txt | 128 --cgroups=:: 129 Names of cgroups for sender and receiver, separated by a comma. 131 Note that perf doesn't create nor delete the cgroups, so users should 132 make sure that the cgroups exist and are accessible before use. 154 (executing 1000000 pipe operations between cgroups)
|
| /linux-6.15/block/ |
| H A D | Kconfig.iosched | 38 (cgroups-v1) or io (cgroups-v2) controller.
|
| /linux-6.15/Documentation/gpu/ |
| H A D | drm-compute.rst | 38 controlling resources. The standard kernel way of doing so is cgroups. 40 This creates a third option, using cgroups to prevent eviction. Both GPU and 43 into cgroups, that will allow jobs to run next to each other without
|
| /linux-6.15/tools/perf/util/bpf_skel/ |
| H A D | off_cpu.bpf.c | 126 return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id); in get_cgroup_id() 137 cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup); in get_cgroup_id()
|
| /linux-6.15/include/linux/ |
| H A D | psi.h | 63 rcu_assign_pointer(p->cgroups, to); in cgroup_move_task()
|
| /linux-6.15/Documentation/bpf/ |
| H A D | map_cgrp_storage.rst | 9 storage for cgroups. It is only available with ``CONFIG_CGROUPS``. 56 ptr = bpf_cgrp_storage_get(&cgrp_storage, task->cgroups->dfl_cgrp, 0,
|
| H A D | map_cgroup_storage.rst | 10 attach to cgroups; the programs are made available by the same Kconfig. The 16 cgroups on their own. 132 that uses the map. A program may be attached to multiple cgroups or have
|