1 /* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <[email protected]> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <[email protected]> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #ifndef _LINUX_MEMCONTROL_H 21 #define _LINUX_MEMCONTROL_H 22 #include <linux/cgroup.h> 23 #include <linux/vm_event_item.h> 24 25 struct mem_cgroup; 26 struct page_cgroup; 27 struct page; 28 struct mm_struct; 29 30 /* Stats that can be updated by kernel. */ 31 enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33 }; 34 35 struct mem_cgroup_reclaim_cookie { 36 struct zone *zone; 37 int priority; 38 unsigned int generation; 39 }; 40 41 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 42 /* 43 * All "charge" functions with gfp_mask should use GFP_KERNEL or 44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 45 * alloc memory but reclaims memory from all available zones. So, "where I want 46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 47 * available but adding a rule is better. charge functions' gfp_mask should 48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 49 * codes. 50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 51 */ 52 53 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 54 gfp_t gfp_mask); 55 /* for swap handling */ 56 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp); 58 extern void mem_cgroup_commit_charge_swapin(struct page *page, 59 struct mem_cgroup *memcg); 60 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); 61 62 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 63 gfp_t gfp_mask); 64 65 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 66 struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, 67 enum lru_list); 68 void mem_cgroup_lru_del_list(struct page *, enum lru_list); 69 void mem_cgroup_lru_del(struct page *); 70 struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *, 71 enum lru_list, enum lru_list); 72 73 /* For coalescing uncharge for reducing memcg' overhead*/ 74 extern void mem_cgroup_uncharge_start(void); 75 extern void mem_cgroup_uncharge_end(void); 76 77 extern void mem_cgroup_uncharge_page(struct page *page); 78 extern void mem_cgroup_uncharge_cache_page(struct page *page); 79 80 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 81 int order); 82 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 83 84 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 85 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 86 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 87 88 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 89 extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); 90 91 static inline 92 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 93 { 94 struct mem_cgroup *memcg; 95 rcu_read_lock(); 96 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 97 rcu_read_unlock(); 98 return cgroup == memcg; 99 } 100 101 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 102 103 extern int 104 mem_cgroup_prepare_migration(struct page *page, 105 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); 106 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 107 struct page *oldpage, struct page *newpage, bool migration_ok); 108 109 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 110 struct mem_cgroup *, 111 struct mem_cgroup_reclaim_cookie *); 112 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 113 114 /* 115 * For memory reclaim. 116 */ 117 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, 118 struct zone *zone); 119 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, 120 struct zone *zone); 121 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 122 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 123 int nid, int zid, unsigned int lrumask); 124 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 125 struct zone *zone); 126 struct zone_reclaim_stat* 127 mem_cgroup_get_reclaim_stat_from_page(struct page *page); 128 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 129 struct task_struct *p); 130 extern void mem_cgroup_replace_page_cache(struct page *oldpage, 131 struct page *newpage); 132 133 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 134 extern int do_swap_account; 135 #endif 136 137 static inline bool mem_cgroup_disabled(void) 138 { 139 if (mem_cgroup_subsys.disabled) 140 return true; 141 return false; 142 } 143 144 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 145 unsigned long *flags); 146 147 extern atomic_t memcg_moving; 148 149 static inline void mem_cgroup_begin_update_page_stat(struct page *page, 150 bool *locked, unsigned long *flags) 151 { 152 if (mem_cgroup_disabled()) 153 return; 154 rcu_read_lock(); 155 *locked = false; 156 if (atomic_read(&memcg_moving)) 157 __mem_cgroup_begin_update_page_stat(page, locked, flags); 158 } 159 160 void __mem_cgroup_end_update_page_stat(struct page *page, 161 unsigned long *flags); 162 static inline void mem_cgroup_end_update_page_stat(struct page *page, 163 bool *locked, unsigned long *flags) 164 { 165 if (mem_cgroup_disabled()) 166 return; 167 if (*locked) 168 __mem_cgroup_end_update_page_stat(page, flags); 169 rcu_read_unlock(); 170 } 171 172 void mem_cgroup_update_page_stat(struct page *page, 173 enum mem_cgroup_page_stat_item idx, 174 int val); 175 176 static inline void mem_cgroup_inc_page_stat(struct page *page, 177 enum mem_cgroup_page_stat_item idx) 178 { 179 mem_cgroup_update_page_stat(page, idx, 1); 180 } 181 182 static inline void mem_cgroup_dec_page_stat(struct page *page, 183 enum mem_cgroup_page_stat_item idx) 184 { 185 mem_cgroup_update_page_stat(page, idx, -1); 186 } 187 188 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 189 gfp_t gfp_mask, 190 unsigned long *total_scanned); 191 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); 192 193 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 194 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 195 void mem_cgroup_split_huge_fixup(struct page *head); 196 #endif 197 198 #ifdef CONFIG_DEBUG_VM 199 bool mem_cgroup_bad_page_check(struct page *page); 200 void mem_cgroup_print_bad_page(struct page *page); 201 #endif 202 #else /* CONFIG_CGROUP_MEM_RES_CTLR */ 203 struct mem_cgroup; 204 205 static inline int mem_cgroup_newpage_charge(struct page *page, 206 struct mm_struct *mm, gfp_t gfp_mask) 207 { 208 return 0; 209 } 210 211 static inline int mem_cgroup_cache_charge(struct page *page, 212 struct mm_struct *mm, gfp_t gfp_mask) 213 { 214 return 0; 215 } 216 217 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 218 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) 219 { 220 return 0; 221 } 222 223 static inline void mem_cgroup_commit_charge_swapin(struct page *page, 224 struct mem_cgroup *memcg) 225 { 226 } 227 228 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 229 { 230 } 231 232 static inline void mem_cgroup_uncharge_start(void) 233 { 234 } 235 236 static inline void mem_cgroup_uncharge_end(void) 237 { 238 } 239 240 static inline void mem_cgroup_uncharge_page(struct page *page) 241 { 242 } 243 244 static inline void mem_cgroup_uncharge_cache_page(struct page *page) 245 { 246 } 247 248 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 249 struct mem_cgroup *memcg) 250 { 251 return &zone->lruvec; 252 } 253 254 static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, 255 struct page *page, 256 enum lru_list lru) 257 { 258 return &zone->lruvec; 259 } 260 261 static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) 262 { 263 } 264 265 static inline void mem_cgroup_lru_del(struct page *page) 266 { 267 } 268 269 static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, 270 struct page *page, 271 enum lru_list from, 272 enum lru_list to) 273 { 274 return &zone->lruvec; 275 } 276 277 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 278 { 279 return NULL; 280 } 281 282 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 283 { 284 return NULL; 285 } 286 287 static inline int mm_match_cgroup(struct mm_struct *mm, 288 struct mem_cgroup *memcg) 289 { 290 return 1; 291 } 292 293 static inline int task_in_mem_cgroup(struct task_struct *task, 294 const struct mem_cgroup *memcg) 295 { 296 return 1; 297 } 298 299 static inline struct cgroup_subsys_state 300 *mem_cgroup_css(struct mem_cgroup *memcg) 301 { 302 return NULL; 303 } 304 305 static inline int 306 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 307 struct mem_cgroup **memcgp, gfp_t gfp_mask) 308 { 309 return 0; 310 } 311 312 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, 313 struct page *oldpage, struct page *newpage, bool migration_ok) 314 { 315 } 316 317 static inline struct mem_cgroup * 318 mem_cgroup_iter(struct mem_cgroup *root, 319 struct mem_cgroup *prev, 320 struct mem_cgroup_reclaim_cookie *reclaim) 321 { 322 return NULL; 323 } 324 325 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 326 struct mem_cgroup *prev) 327 { 328 } 329 330 static inline bool mem_cgroup_disabled(void) 331 { 332 return true; 333 } 334 335 static inline int 336 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 337 { 338 return 1; 339 } 340 341 static inline int 342 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) 343 { 344 return 1; 345 } 346 347 static inline unsigned long 348 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 349 unsigned int lru_mask) 350 { 351 return 0; 352 } 353 354 355 static inline struct zone_reclaim_stat* 356 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 357 { 358 return NULL; 359 } 360 361 static inline struct zone_reclaim_stat* 362 mem_cgroup_get_reclaim_stat_from_page(struct page *page) 363 { 364 return NULL; 365 } 366 367 static inline void 368 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 369 { 370 } 371 372 static inline void mem_cgroup_begin_update_page_stat(struct page *page, 373 bool *locked, unsigned long *flags) 374 { 375 } 376 377 static inline void mem_cgroup_end_update_page_stat(struct page *page, 378 bool *locked, unsigned long *flags) 379 { 380 } 381 382 static inline void mem_cgroup_inc_page_stat(struct page *page, 383 enum mem_cgroup_page_stat_item idx) 384 { 385 } 386 387 static inline void mem_cgroup_dec_page_stat(struct page *page, 388 enum mem_cgroup_page_stat_item idx) 389 { 390 } 391 392 static inline 393 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 394 gfp_t gfp_mask, 395 unsigned long *total_scanned) 396 { 397 return 0; 398 } 399 400 static inline 401 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 402 { 403 return 0; 404 } 405 406 static inline void mem_cgroup_split_huge_fixup(struct page *head) 407 { 408 } 409 410 static inline 411 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 412 { 413 } 414 static inline void mem_cgroup_replace_page_cache(struct page *oldpage, 415 struct page *newpage) 416 { 417 } 418 #endif /* CONFIG_CGROUP_MEM_RES_CTLR */ 419 420 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 421 static inline bool 422 mem_cgroup_bad_page_check(struct page *page) 423 { 424 return false; 425 } 426 427 static inline void 428 mem_cgroup_print_bad_page(struct page *page) 429 { 430 } 431 #endif 432 433 enum { 434 UNDER_LIMIT, 435 SOFT_LIMIT, 436 OVER_LIMIT, 437 }; 438 439 struct sock; 440 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 441 void sock_update_memcg(struct sock *sk); 442 void sock_release_memcg(struct sock *sk); 443 #else 444 static inline void sock_update_memcg(struct sock *sk) 445 { 446 } 447 static inline void sock_release_memcg(struct sock *sk) 448 { 449 } 450 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 451 #endif /* _LINUX_MEMCONTROL_H */ 452 453