1 #ifndef _LINUX_CGROUP_H 2 #define _LINUX_CGROUP_H 3 /* 4 * cgroup interface 5 * 6 * Copyright (C) 2003 BULL SA 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 8 * 9 */ 10 11 #include <linux/sched.h> 12 #include <linux/kref.h> 13 #include <linux/cpumask.h> 14 #include <linux/nodemask.h> 15 #include <linux/rcupdate.h> 16 #include <linux/cgroupstats.h> 17 #include <linux/prio_heap.h> 18 19 #ifdef CONFIG_CGROUPS 20 21 struct cgroupfs_root; 22 struct cgroup_subsys; 23 struct inode; 24 25 extern int cgroup_init_early(void); 26 extern int cgroup_init(void); 27 extern void cgroup_init_smp(void); 28 extern void cgroup_lock(void); 29 extern void cgroup_unlock(void); 30 extern void cgroup_fork(struct task_struct *p); 31 extern void cgroup_fork_callbacks(struct task_struct *p); 32 extern void cgroup_post_fork(struct task_struct *p); 33 extern void cgroup_exit(struct task_struct *p, int run_callbacks); 34 extern int cgroupstats_build(struct cgroupstats *stats, 35 struct dentry *dentry); 36 37 extern struct file_operations proc_cgroup_operations; 38 39 /* Define the enumeration of all cgroup subsystems */ 40 #define SUBSYS(_x) _x ## _subsys_id, 41 enum cgroup_subsys_id { 42 #include <linux/cgroup_subsys.h> 43 CGROUP_SUBSYS_COUNT 44 }; 45 #undef SUBSYS 46 47 /* Per-subsystem/per-cgroup state maintained by the system. */ 48 struct cgroup_subsys_state { 49 /* The cgroup that this subsystem is attached to. Useful 50 * for subsystems that want to know about the cgroup 51 * hierarchy structure */ 52 struct cgroup *cgroup; 53 54 /* State maintained by the cgroup system to allow 55 * subsystems to be "busy". Should be accessed via css_get() 56 * and css_put() */ 57 58 atomic_t refcnt; 59 60 unsigned long flags; 61 }; 62 63 /* bits in struct cgroup_subsys_state flags field */ 64 enum { 65 CSS_ROOT, /* This CSS is the root of the subsystem */ 66 }; 67 68 /* 69 * Call css_get() to hold a reference on the cgroup; 70 * 71 */ 72 73 static inline void css_get(struct cgroup_subsys_state *css) 74 { 75 /* We don't need to reference count the root state */ 76 if (!test_bit(CSS_ROOT, &css->flags)) 77 atomic_inc(&css->refcnt); 78 } 79 /* 80 * css_put() should be called to release a reference taken by 81 * css_get() 82 */ 83 84 extern void __css_put(struct cgroup_subsys_state *css); 85 static inline void css_put(struct cgroup_subsys_state *css) 86 { 87 if (!test_bit(CSS_ROOT, &css->flags)) 88 __css_put(css); 89 } 90 91 /* bits in struct cgroup flags field */ 92 enum { 93 /* Control Group is dead */ 94 CGRP_REMOVED, 95 /* Control Group has previously had a child cgroup or a task, 96 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ 97 CGRP_RELEASABLE, 98 /* Control Group requires release notifications to userspace */ 99 CGRP_NOTIFY_ON_RELEASE, 100 }; 101 102 struct cgroup { 103 unsigned long flags; /* "unsigned long" so bitops work */ 104 105 /* count users of this cgroup. >0 means busy, but doesn't 106 * necessarily indicate the number of tasks in the 107 * cgroup */ 108 atomic_t count; 109 110 /* 111 * We link our 'sibling' struct into our parent's 'children'. 112 * Our children link their 'sibling' into our 'children'. 113 */ 114 struct list_head sibling; /* my parent's children */ 115 struct list_head children; /* my children */ 116 117 struct cgroup *parent; /* my parent */ 118 struct dentry *dentry; /* cgroup fs entry */ 119 120 /* Private pointers for each registered subsystem */ 121 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 122 123 struct cgroupfs_root *root; 124 struct cgroup *top_cgroup; 125 126 /* 127 * List of cg_cgroup_links pointing at css_sets with 128 * tasks in this cgroup. Protected by css_set_lock 129 */ 130 struct list_head css_sets; 131 132 /* 133 * Linked list running through all cgroups that can 134 * potentially be reaped by the release agent. Protected by 135 * release_list_lock 136 */ 137 struct list_head release_list; 138 }; 139 140 /* A css_set is a structure holding pointers to a set of 141 * cgroup_subsys_state objects. This saves space in the task struct 142 * object and speeds up fork()/exit(), since a single inc/dec and a 143 * list_add()/del() can bump the reference count on the entire 144 * cgroup set for a task. 145 */ 146 147 struct css_set { 148 149 /* Reference count */ 150 struct kref ref; 151 152 /* 153 * List running through all cgroup groups in the same hash 154 * slot. Protected by css_set_lock 155 */ 156 struct hlist_node hlist; 157 158 /* 159 * List running through all tasks using this cgroup 160 * group. Protected by css_set_lock 161 */ 162 struct list_head tasks; 163 164 /* 165 * List of cg_cgroup_link objects on link chains from 166 * cgroups referenced from this css_set. Protected by 167 * css_set_lock 168 */ 169 struct list_head cg_links; 170 171 /* 172 * Set of subsystem states, one for each subsystem. This array 173 * is immutable after creation apart from the init_css_set 174 * during subsystem registration (at boot time). 175 */ 176 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 177 }; 178 179 /* 180 * cgroup_map_cb is an abstract callback API for reporting map-valued 181 * control files 182 */ 183 184 struct cgroup_map_cb { 185 int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); 186 void *state; 187 }; 188 189 /* struct cftype: 190 * 191 * The files in the cgroup filesystem mostly have a very simple read/write 192 * handling, some common function will take care of it. Nevertheless some cases 193 * (read tasks) are special and therefore I define this structure for every 194 * kind of file. 195 * 196 * 197 * When reading/writing to a file: 198 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata 199 * - the 'cftype' of the file is file->f_dentry->d_fsdata 200 */ 201 202 #define MAX_CFTYPE_NAME 64 203 struct cftype { 204 /* By convention, the name should begin with the name of the 205 * subsystem, followed by a period */ 206 char name[MAX_CFTYPE_NAME]; 207 int private; 208 int (*open) (struct inode *inode, struct file *file); 209 ssize_t (*read) (struct cgroup *cgrp, struct cftype *cft, 210 struct file *file, 211 char __user *buf, size_t nbytes, loff_t *ppos); 212 /* 213 * read_u64() is a shortcut for the common case of returning a 214 * single integer. Use it in place of read() 215 */ 216 u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft); 217 /* 218 * read_s64() is a signed version of read_u64() 219 */ 220 s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft); 221 /* 222 * read_map() is used for defining a map of key/value 223 * pairs. It should call cb->fill(cb, key, value) for each 224 * entry. The key/value pairs (and their ordering) should not 225 * change between reboots. 226 */ 227 int (*read_map) (struct cgroup *cont, struct cftype *cft, 228 struct cgroup_map_cb *cb); 229 /* 230 * read_seq_string() is used for outputting a simple sequence 231 * using seqfile. 232 */ 233 int (*read_seq_string) (struct cgroup *cont, struct cftype *cft, 234 struct seq_file *m); 235 236 ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft, 237 struct file *file, 238 const char __user *buf, size_t nbytes, loff_t *ppos); 239 240 /* 241 * write_u64() is a shortcut for the common case of accepting 242 * a single integer (as parsed by simple_strtoull) from 243 * userspace. Use in place of write(); return 0 or error. 244 */ 245 int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val); 246 /* 247 * write_s64() is a signed version of write_u64() 248 */ 249 int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val); 250 251 /* 252 * trigger() callback can be used to get some kick from the 253 * userspace, when the actual string written is not important 254 * at all. The private field can be used to determine the 255 * kick type for multiplexing. 256 */ 257 int (*trigger)(struct cgroup *cgrp, unsigned int event); 258 259 int (*release) (struct inode *inode, struct file *file); 260 }; 261 262 struct cgroup_scanner { 263 struct cgroup *cg; 264 int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); 265 void (*process_task)(struct task_struct *p, 266 struct cgroup_scanner *scan); 267 struct ptr_heap *heap; 268 }; 269 270 /* Add a new file to the given cgroup directory. Should only be 271 * called by subsystems from within a populate() method */ 272 int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, 273 const struct cftype *cft); 274 275 /* Add a set of new files to the given cgroup directory. Should 276 * only be called by subsystems from within a populate() method */ 277 int cgroup_add_files(struct cgroup *cgrp, 278 struct cgroup_subsys *subsys, 279 const struct cftype cft[], 280 int count); 281 282 int cgroup_is_removed(const struct cgroup *cgrp); 283 284 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); 285 286 int cgroup_task_count(const struct cgroup *cgrp); 287 288 /* Return true if the cgroup is a descendant of the current cgroup */ 289 int cgroup_is_descendant(const struct cgroup *cgrp); 290 291 /* Control Group subsystem type. See Documentation/cgroups.txt for details */ 292 293 struct cgroup_subsys { 294 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, 295 struct cgroup *cgrp); 296 void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 297 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 298 int (*can_attach)(struct cgroup_subsys *ss, 299 struct cgroup *cgrp, struct task_struct *tsk); 300 void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, 301 struct cgroup *old_cgrp, struct task_struct *tsk); 302 void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); 303 void (*exit)(struct cgroup_subsys *ss, struct task_struct *task); 304 int (*populate)(struct cgroup_subsys *ss, 305 struct cgroup *cgrp); 306 void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); 307 void (*bind)(struct cgroup_subsys *ss, struct cgroup *root); 308 /* 309 * This routine is called with the task_lock of mm->owner held 310 */ 311 void (*mm_owner_changed)(struct cgroup_subsys *ss, 312 struct cgroup *old, 313 struct cgroup *new); 314 int subsys_id; 315 int active; 316 int disabled; 317 int early_init; 318 #define MAX_CGROUP_TYPE_NAMELEN 32 319 const char *name; 320 321 /* Protected by RCU */ 322 struct cgroupfs_root *root; 323 324 struct list_head sibling; 325 326 void *private; 327 }; 328 329 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; 330 #include <linux/cgroup_subsys.h> 331 #undef SUBSYS 332 333 static inline struct cgroup_subsys_state *cgroup_subsys_state( 334 struct cgroup *cgrp, int subsys_id) 335 { 336 return cgrp->subsys[subsys_id]; 337 } 338 339 static inline struct cgroup_subsys_state *task_subsys_state( 340 struct task_struct *task, int subsys_id) 341 { 342 return rcu_dereference(task->cgroups->subsys[subsys_id]); 343 } 344 345 static inline struct cgroup* task_cgroup(struct task_struct *task, 346 int subsys_id) 347 { 348 return task_subsys_state(task, subsys_id)->cgroup; 349 } 350 351 int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss); 352 353 /* A cgroup_iter should be treated as an opaque object */ 354 struct cgroup_iter { 355 struct list_head *cg_link; 356 struct list_head *task; 357 }; 358 359 /* To iterate across the tasks in a cgroup: 360 * 361 * 1) call cgroup_iter_start to intialize an iterator 362 * 363 * 2) call cgroup_iter_next() to retrieve member tasks until it 364 * returns NULL or until you want to end the iteration 365 * 366 * 3) call cgroup_iter_end() to destroy the iterator. 367 * 368 * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset. 369 * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task() 370 * callback, but not while calling the process_task() callback. 371 */ 372 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); 373 struct task_struct *cgroup_iter_next(struct cgroup *cgrp, 374 struct cgroup_iter *it); 375 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); 376 int cgroup_scan_tasks(struct cgroup_scanner *scan); 377 int cgroup_attach_task(struct cgroup *, struct task_struct *); 378 379 #else /* !CONFIG_CGROUPS */ 380 381 static inline int cgroup_init_early(void) { return 0; } 382 static inline int cgroup_init(void) { return 0; } 383 static inline void cgroup_init_smp(void) {} 384 static inline void cgroup_fork(struct task_struct *p) {} 385 static inline void cgroup_fork_callbacks(struct task_struct *p) {} 386 static inline void cgroup_post_fork(struct task_struct *p) {} 387 static inline void cgroup_exit(struct task_struct *p, int callbacks) {} 388 389 static inline void cgroup_lock(void) {} 390 static inline void cgroup_unlock(void) {} 391 static inline int cgroupstats_build(struct cgroupstats *stats, 392 struct dentry *dentry) 393 { 394 return -EINVAL; 395 } 396 397 #endif /* !CONFIG_CGROUPS */ 398 399 #ifdef CONFIG_MM_OWNER 400 extern void 401 cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new); 402 #else /* !CONFIG_MM_OWNER */ 403 static inline void 404 cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) 405 { 406 } 407 #endif /* CONFIG_MM_OWNER */ 408 #endif /* _LINUX_CGROUP_H */ 409