1 /* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <[email protected]> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <[email protected]> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #ifndef _LINUX_MEMCONTROL_H 21 #define _LINUX_MEMCONTROL_H 22 #include <linux/cgroup.h> 23 #include <linux/vm_event_item.h> 24 #include <linux/hardirq.h> 25 #include <linux/jump_label.h> 26 27 struct mem_cgroup; 28 struct page_cgroup; 29 struct page; 30 struct mm_struct; 31 struct kmem_cache; 32 33 /* Stats that can be updated by kernel. */ 34 enum mem_cgroup_page_stat_item { 35 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 36 }; 37 38 struct mem_cgroup_reclaim_cookie { 39 struct zone *zone; 40 int priority; 41 unsigned int generation; 42 }; 43 44 #ifdef CONFIG_MEMCG 45 /* 46 * All "charge" functions with gfp_mask should use GFP_KERNEL or 47 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 48 * alloc memory but reclaims memory from all available zones. So, "where I want 49 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 50 * available but adding a rule is better. charge functions' gfp_mask should 51 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 52 * codes. 53 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 54 */ 55 56 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 57 gfp_t gfp_mask); 58 /* for swap handling */ 59 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 60 struct page *page, gfp_t mask, struct mem_cgroup **memcgp); 61 extern void mem_cgroup_commit_charge_swapin(struct page *page, 62 struct mem_cgroup *memcg); 63 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); 64 65 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 66 gfp_t gfp_mask); 67 68 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 69 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 70 71 /* For coalescing uncharge for reducing memcg' overhead*/ 72 extern void mem_cgroup_uncharge_start(void); 73 extern void mem_cgroup_uncharge_end(void); 74 75 extern void mem_cgroup_uncharge_page(struct page *page); 76 extern void mem_cgroup_uncharge_cache_page(struct page *page); 77 78 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 79 struct mem_cgroup *memcg); 80 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 81 82 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 83 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 84 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 85 86 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 87 extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); 88 89 static inline 90 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) 91 { 92 struct mem_cgroup *task_memcg; 93 bool match; 94 95 rcu_read_lock(); 96 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 97 match = __mem_cgroup_same_or_subtree(memcg, task_memcg); 98 rcu_read_unlock(); 99 return match; 100 } 101 102 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 103 104 extern void 105 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 106 struct mem_cgroup **memcgp); 107 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 108 struct page *oldpage, struct page *newpage, bool migration_ok); 109 110 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 111 struct mem_cgroup *, 112 struct mem_cgroup_reclaim_cookie *); 113 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 114 115 /* 116 * For memory reclaim. 117 */ 118 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); 119 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 120 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 121 void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 122 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 123 struct task_struct *p); 124 extern void mem_cgroup_replace_page_cache(struct page *oldpage, 125 struct page *newpage); 126 127 #ifdef CONFIG_MEMCG_SWAP 128 extern int do_swap_account; 129 #endif 130 131 static inline bool mem_cgroup_disabled(void) 132 { 133 if (mem_cgroup_subsys.disabled) 134 return true; 135 return false; 136 } 137 138 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 139 unsigned long *flags); 140 141 extern atomic_t memcg_moving; 142 143 static inline void mem_cgroup_begin_update_page_stat(struct page *page, 144 bool *locked, unsigned long *flags) 145 { 146 if (mem_cgroup_disabled()) 147 return; 148 rcu_read_lock(); 149 *locked = false; 150 if (atomic_read(&memcg_moving)) 151 __mem_cgroup_begin_update_page_stat(page, locked, flags); 152 } 153 154 void __mem_cgroup_end_update_page_stat(struct page *page, 155 unsigned long *flags); 156 static inline void mem_cgroup_end_update_page_stat(struct page *page, 157 bool *locked, unsigned long *flags) 158 { 159 if (mem_cgroup_disabled()) 160 return; 161 if (*locked) 162 __mem_cgroup_end_update_page_stat(page, flags); 163 rcu_read_unlock(); 164 } 165 166 void mem_cgroup_update_page_stat(struct page *page, 167 enum mem_cgroup_page_stat_item idx, 168 int val); 169 170 static inline void mem_cgroup_inc_page_stat(struct page *page, 171 enum mem_cgroup_page_stat_item idx) 172 { 173 mem_cgroup_update_page_stat(page, idx, 1); 174 } 175 176 static inline void mem_cgroup_dec_page_stat(struct page *page, 177 enum mem_cgroup_page_stat_item idx) 178 { 179 mem_cgroup_update_page_stat(page, idx, -1); 180 } 181 182 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 183 gfp_t gfp_mask, 184 unsigned long *total_scanned); 185 186 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 187 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 188 enum vm_event_item idx) 189 { 190 if (mem_cgroup_disabled()) 191 return; 192 __mem_cgroup_count_vm_event(mm, idx); 193 } 194 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 195 void mem_cgroup_split_huge_fixup(struct page *head); 196 #endif 197 198 #ifdef CONFIG_DEBUG_VM 199 bool mem_cgroup_bad_page_check(struct page *page); 200 void mem_cgroup_print_bad_page(struct page *page); 201 #endif 202 #else /* CONFIG_MEMCG */ 203 struct mem_cgroup; 204 205 static inline int mem_cgroup_newpage_charge(struct page *page, 206 struct mm_struct *mm, gfp_t gfp_mask) 207 { 208 return 0; 209 } 210 211 static inline int mem_cgroup_cache_charge(struct page *page, 212 struct mm_struct *mm, gfp_t gfp_mask) 213 { 214 return 0; 215 } 216 217 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 218 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) 219 { 220 return 0; 221 } 222 223 static inline void mem_cgroup_commit_charge_swapin(struct page *page, 224 struct mem_cgroup *memcg) 225 { 226 } 227 228 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 229 { 230 } 231 232 static inline void mem_cgroup_uncharge_start(void) 233 { 234 } 235 236 static inline void mem_cgroup_uncharge_end(void) 237 { 238 } 239 240 static inline void mem_cgroup_uncharge_page(struct page *page) 241 { 242 } 243 244 static inline void mem_cgroup_uncharge_cache_page(struct page *page) 245 { 246 } 247 248 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 249 struct mem_cgroup *memcg) 250 { 251 return &zone->lruvec; 252 } 253 254 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 255 struct zone *zone) 256 { 257 return &zone->lruvec; 258 } 259 260 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 261 { 262 return NULL; 263 } 264 265 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 266 { 267 return NULL; 268 } 269 270 static inline bool mm_match_cgroup(struct mm_struct *mm, 271 struct mem_cgroup *memcg) 272 { 273 return true; 274 } 275 276 static inline int task_in_mem_cgroup(struct task_struct *task, 277 const struct mem_cgroup *memcg) 278 { 279 return 1; 280 } 281 282 static inline struct cgroup_subsys_state 283 *mem_cgroup_css(struct mem_cgroup *memcg) 284 { 285 return NULL; 286 } 287 288 static inline void 289 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 290 struct mem_cgroup **memcgp) 291 { 292 } 293 294 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, 295 struct page *oldpage, struct page *newpage, bool migration_ok) 296 { 297 } 298 299 static inline struct mem_cgroup * 300 mem_cgroup_iter(struct mem_cgroup *root, 301 struct mem_cgroup *prev, 302 struct mem_cgroup_reclaim_cookie *reclaim) 303 { 304 return NULL; 305 } 306 307 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 308 struct mem_cgroup *prev) 309 { 310 } 311 312 static inline bool mem_cgroup_disabled(void) 313 { 314 return true; 315 } 316 317 static inline int 318 mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 319 { 320 return 1; 321 } 322 323 static inline unsigned long 324 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 325 { 326 return 0; 327 } 328 329 static inline void 330 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 331 int increment) 332 { 333 } 334 335 static inline void 336 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 337 { 338 } 339 340 static inline void mem_cgroup_begin_update_page_stat(struct page *page, 341 bool *locked, unsigned long *flags) 342 { 343 } 344 345 static inline void mem_cgroup_end_update_page_stat(struct page *page, 346 bool *locked, unsigned long *flags) 347 { 348 } 349 350 static inline void mem_cgroup_inc_page_stat(struct page *page, 351 enum mem_cgroup_page_stat_item idx) 352 { 353 } 354 355 static inline void mem_cgroup_dec_page_stat(struct page *page, 356 enum mem_cgroup_page_stat_item idx) 357 { 358 } 359 360 static inline 361 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 362 gfp_t gfp_mask, 363 unsigned long *total_scanned) 364 { 365 return 0; 366 } 367 368 static inline void mem_cgroup_split_huge_fixup(struct page *head) 369 { 370 } 371 372 static inline 373 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 374 { 375 } 376 static inline void mem_cgroup_replace_page_cache(struct page *oldpage, 377 struct page *newpage) 378 { 379 } 380 #endif /* CONFIG_MEMCG */ 381 382 #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) 383 static inline bool 384 mem_cgroup_bad_page_check(struct page *page) 385 { 386 return false; 387 } 388 389 static inline void 390 mem_cgroup_print_bad_page(struct page *page) 391 { 392 } 393 #endif 394 395 enum { 396 UNDER_LIMIT, 397 SOFT_LIMIT, 398 OVER_LIMIT, 399 }; 400 401 struct sock; 402 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 403 void sock_update_memcg(struct sock *sk); 404 void sock_release_memcg(struct sock *sk); 405 #else 406 static inline void sock_update_memcg(struct sock *sk) 407 { 408 } 409 static inline void sock_release_memcg(struct sock *sk) 410 { 411 } 412 #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ 413 414 #ifdef CONFIG_MEMCG_KMEM 415 extern struct static_key memcg_kmem_enabled_key; 416 417 extern int memcg_limited_groups_array_size; 418 419 /* 420 * Helper macro to loop through all memcg-specific caches. Callers must still 421 * check if the cache is valid (it is either valid or NULL). 422 * the slab_mutex must be held when looping through those caches 423 */ 424 #define for_each_memcg_cache_index(_idx) \ 425 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) 426 427 static inline bool memcg_kmem_enabled(void) 428 { 429 return static_key_false(&memcg_kmem_enabled_key); 430 } 431 432 /* 433 * In general, we'll do everything in our power to not incur in any overhead 434 * for non-memcg users for the kmem functions. Not even a function call, if we 435 * can avoid it. 436 * 437 * Therefore, we'll inline all those functions so that in the best case, we'll 438 * see that kmemcg is off for everybody and proceed quickly. If it is on, 439 * we'll still do most of the flag checking inline. We check a lot of 440 * conditions, but because they are pretty simple, they are expected to be 441 * fast. 442 */ 443 bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, 444 int order); 445 void __memcg_kmem_commit_charge(struct page *page, 446 struct mem_cgroup *memcg, int order); 447 void __memcg_kmem_uncharge_pages(struct page *page, int order); 448 449 int memcg_cache_id(struct mem_cgroup *memcg); 450 int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 451 struct kmem_cache *root_cache); 452 void memcg_release_cache(struct kmem_cache *cachep); 453 void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); 454 455 int memcg_update_cache_size(struct kmem_cache *s, int num_groups); 456 void memcg_update_array_size(int num_groups); 457 458 struct kmem_cache * 459 __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); 460 461 void mem_cgroup_destroy_cache(struct kmem_cache *cachep); 462 void kmem_cache_destroy_memcg_children(struct kmem_cache *s); 463 464 /** 465 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 466 * @gfp: the gfp allocation flags. 467 * @memcg: a pointer to the memcg this was charged against. 468 * @order: allocation order. 469 * 470 * returns true if the memcg where the current task belongs can hold this 471 * allocation. 472 * 473 * We return true automatically if this allocation is not to be accounted to 474 * any memcg. 475 */ 476 static inline bool 477 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 478 { 479 if (!memcg_kmem_enabled()) 480 return true; 481 482 /* 483 * __GFP_NOFAIL allocations will move on even if charging is not 484 * possible. Therefore we don't even try, and have this allocation 485 * unaccounted. We could in theory charge it with 486 * res_counter_charge_nofail, but we hope those allocations are rare, 487 * and won't be worth the trouble. 488 */ 489 if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL)) 490 return true; 491 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 492 return true; 493 494 /* If the test is dying, just let it go. */ 495 if (unlikely(fatal_signal_pending(current))) 496 return true; 497 498 return __memcg_kmem_newpage_charge(gfp, memcg, order); 499 } 500 501 /** 502 * memcg_kmem_uncharge_pages: uncharge pages from memcg 503 * @page: pointer to struct page being freed 504 * @order: allocation order. 505 * 506 * there is no need to specify memcg here, since it is embedded in page_cgroup 507 */ 508 static inline void 509 memcg_kmem_uncharge_pages(struct page *page, int order) 510 { 511 if (memcg_kmem_enabled()) 512 __memcg_kmem_uncharge_pages(page, order); 513 } 514 515 /** 516 * memcg_kmem_commit_charge: embeds correct memcg in a page 517 * @page: pointer to struct page recently allocated 518 * @memcg: the memcg structure we charged against 519 * @order: allocation order. 520 * 521 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or 522 * failure of the allocation. if @page is NULL, this function will revert the 523 * charges. Otherwise, it will commit the memcg given by @memcg to the 524 * corresponding page_cgroup. 525 */ 526 static inline void 527 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 528 { 529 if (memcg_kmem_enabled() && memcg) 530 __memcg_kmem_commit_charge(page, memcg, order); 531 } 532 533 /** 534 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation 535 * @cachep: the original global kmem cache 536 * @gfp: allocation flags. 537 * 538 * This function assumes that the task allocating, which determines the memcg 539 * in the page allocator, belongs to the same cgroup throughout the whole 540 * process. Misacounting can happen if the task calls memcg_kmem_get_cache() 541 * while belonging to a cgroup, and later on changes. This is considered 542 * acceptable, and should only happen upon task migration. 543 * 544 * Before the cache is created by the memcg core, there is also a possible 545 * imbalance: the task belongs to a memcg, but the cache being allocated from 546 * is the global cache, since the child cache is not yet guaranteed to be 547 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be 548 * passed and the page allocator will not attempt any cgroup accounting. 549 */ 550 static __always_inline struct kmem_cache * 551 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 552 { 553 if (!memcg_kmem_enabled()) 554 return cachep; 555 if (gfp & __GFP_NOFAIL) 556 return cachep; 557 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 558 return cachep; 559 if (unlikely(fatal_signal_pending(current))) 560 return cachep; 561 562 return __memcg_kmem_get_cache(cachep, gfp); 563 } 564 #else 565 #define for_each_memcg_cache_index(_idx) \ 566 for (; NULL; ) 567 568 static inline bool memcg_kmem_enabled(void) 569 { 570 return false; 571 } 572 573 static inline bool 574 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 575 { 576 return true; 577 } 578 579 static inline void memcg_kmem_uncharge_pages(struct page *page, int order) 580 { 581 } 582 583 static inline void 584 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 585 { 586 } 587 588 static inline int memcg_cache_id(struct mem_cgroup *memcg) 589 { 590 return -1; 591 } 592 593 static inline int 594 memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 595 struct kmem_cache *root_cache) 596 { 597 return 0; 598 } 599 600 static inline void memcg_release_cache(struct kmem_cache *cachep) 601 { 602 } 603 604 static inline void memcg_cache_list_add(struct mem_cgroup *memcg, 605 struct kmem_cache *s) 606 { 607 } 608 609 static inline struct kmem_cache * 610 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 611 { 612 return cachep; 613 } 614 615 static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s) 616 { 617 } 618 #endif /* CONFIG_MEMCG_KMEM */ 619 #endif /* _LINUX_MEMCONTROL_H */ 620 621