1 /* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <[email protected]> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <[email protected]> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #ifndef _LINUX_MEMCONTROL_H 21 #define _LINUX_MEMCONTROL_H 22 #include <linux/cgroup.h> 23 #include <linux/vm_event_item.h> 24 #include <linux/hardirq.h> 25 #include <linux/jump_label.h> 26 27 struct mem_cgroup; 28 struct page_cgroup; 29 struct page; 30 struct mm_struct; 31 struct kmem_cache; 32 33 /* 34 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, 35 * These two lists should keep in accord with each other. 36 */ 37 enum mem_cgroup_stat_index { 38 /* 39 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 40 */ 41 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 42 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 43 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 44 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ 46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 47 MEM_CGROUP_STAT_NSTATS, 48 }; 49 50 struct mem_cgroup_reclaim_cookie { 51 struct zone *zone; 52 int priority; 53 unsigned int generation; 54 }; 55 56 enum mem_cgroup_filter_t { 57 VISIT, /* visit current node */ 58 SKIP, /* skip the current node and continue traversal */ 59 SKIP_TREE, /* skip the whole subtree and continue traversal */ 60 }; 61 62 /* 63 * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to 64 * iterate through the hierarchy tree. Each tree element is checked by the 65 * predicate before it is returned by the iterator. If a filter returns 66 * SKIP or SKIP_TREE then the iterator code continues traversal (with the 67 * next node down the hierarchy or the next node that doesn't belong under the 68 * memcg's subtree). 69 */ 70 typedef enum mem_cgroup_filter_t 71 (*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root); 72 73 #ifdef CONFIG_MEMCG 74 /* 75 * All "charge" functions with gfp_mask should use GFP_KERNEL or 76 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 77 * alloc memory but reclaims memory from all available zones. So, "where I want 78 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 79 * available but adding a rule is better. charge functions' gfp_mask should 80 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 81 * codes. 82 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 83 */ 84 85 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 86 gfp_t gfp_mask); 87 /* for swap handling */ 88 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 89 struct page *page, gfp_t mask, struct mem_cgroup **memcgp); 90 extern void mem_cgroup_commit_charge_swapin(struct page *page, 91 struct mem_cgroup *memcg); 92 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); 93 94 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 95 gfp_t gfp_mask); 96 97 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 98 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 99 100 /* For coalescing uncharge for reducing memcg' overhead*/ 101 extern void mem_cgroup_uncharge_start(void); 102 extern void mem_cgroup_uncharge_end(void); 103 104 extern void mem_cgroup_uncharge_page(struct page *page); 105 extern void mem_cgroup_uncharge_cache_page(struct page *page); 106 107 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 108 struct mem_cgroup *memcg); 109 bool task_in_mem_cgroup(struct task_struct *task, 110 const struct mem_cgroup *memcg); 111 112 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 113 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 114 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 115 116 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 117 extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 118 119 static inline 120 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) 121 { 122 struct mem_cgroup *task_memcg; 123 bool match; 124 125 rcu_read_lock(); 126 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 127 match = __mem_cgroup_same_or_subtree(memcg, task_memcg); 128 rcu_read_unlock(); 129 return match; 130 } 131 132 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 133 134 extern void 135 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 136 struct mem_cgroup **memcgp); 137 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 138 struct page *oldpage, struct page *newpage, bool migration_ok); 139 140 struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root, 141 struct mem_cgroup *prev, 142 struct mem_cgroup_reclaim_cookie *reclaim, 143 mem_cgroup_iter_filter cond); 144 145 static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, 146 struct mem_cgroup *prev, 147 struct mem_cgroup_reclaim_cookie *reclaim) 148 { 149 return mem_cgroup_iter_cond(root, prev, reclaim, NULL); 150 } 151 152 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 153 154 /* 155 * For memory reclaim. 156 */ 157 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); 158 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 159 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 160 void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 161 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 162 struct task_struct *p); 163 extern void mem_cgroup_replace_page_cache(struct page *oldpage, 164 struct page *newpage); 165 166 /** 167 * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task 168 * @new: true to enable, false to disable 169 * 170 * Toggle whether a failed memcg charge should invoke the OOM killer 171 * or just return -ENOMEM. Returns the previous toggle state. 172 * 173 * NOTE: Any path that enables the OOM killer before charging must 174 * call mem_cgroup_oom_synchronize() afterward to finalize the 175 * OOM handling and clean up. 176 */ 177 static inline bool mem_cgroup_toggle_oom(bool new) 178 { 179 bool old; 180 181 old = current->memcg_oom.may_oom; 182 current->memcg_oom.may_oom = new; 183 184 return old; 185 } 186 187 static inline void mem_cgroup_enable_oom(void) 188 { 189 bool old = mem_cgroup_toggle_oom(true); 190 191 WARN_ON(old == true); 192 } 193 194 static inline void mem_cgroup_disable_oom(void) 195 { 196 bool old = mem_cgroup_toggle_oom(false); 197 198 WARN_ON(old == false); 199 } 200 201 static inline bool task_in_memcg_oom(struct task_struct *p) 202 { 203 return p->memcg_oom.in_memcg_oom; 204 } 205 206 bool mem_cgroup_oom_synchronize(void); 207 208 #ifdef CONFIG_MEMCG_SWAP 209 extern int do_swap_account; 210 #endif 211 212 static inline bool mem_cgroup_disabled(void) 213 { 214 if (mem_cgroup_subsys.disabled) 215 return true; 216 return false; 217 } 218 219 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 220 unsigned long *flags); 221 222 extern atomic_t memcg_moving; 223 224 static inline void mem_cgroup_begin_update_page_stat(struct page *page, 225 bool *locked, unsigned long *flags) 226 { 227 if (mem_cgroup_disabled()) 228 return; 229 rcu_read_lock(); 230 *locked = false; 231 if (atomic_read(&memcg_moving)) 232 __mem_cgroup_begin_update_page_stat(page, locked, flags); 233 } 234 235 void __mem_cgroup_end_update_page_stat(struct page *page, 236 unsigned long *flags); 237 static inline void mem_cgroup_end_update_page_stat(struct page *page, 238 bool *locked, unsigned long *flags) 239 { 240 if (mem_cgroup_disabled()) 241 return; 242 if (*locked) 243 __mem_cgroup_end_update_page_stat(page, flags); 244 rcu_read_unlock(); 245 } 246 247 void mem_cgroup_update_page_stat(struct page *page, 248 enum mem_cgroup_stat_index idx, 249 int val); 250 251 static inline void mem_cgroup_inc_page_stat(struct page *page, 252 enum mem_cgroup_stat_index idx) 253 { 254 mem_cgroup_update_page_stat(page, idx, 1); 255 } 256 257 static inline void mem_cgroup_dec_page_stat(struct page *page, 258 enum mem_cgroup_stat_index idx) 259 { 260 mem_cgroup_update_page_stat(page, idx, -1); 261 } 262 263 enum mem_cgroup_filter_t 264 mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 265 struct mem_cgroup *root); 266 267 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 268 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 269 enum vm_event_item idx) 270 { 271 if (mem_cgroup_disabled()) 272 return; 273 __mem_cgroup_count_vm_event(mm, idx); 274 } 275 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 276 void mem_cgroup_split_huge_fixup(struct page *head); 277 #endif 278 279 #ifdef CONFIG_DEBUG_VM 280 bool mem_cgroup_bad_page_check(struct page *page); 281 void mem_cgroup_print_bad_page(struct page *page); 282 #endif 283 #else /* CONFIG_MEMCG */ 284 struct mem_cgroup; 285 286 static inline int mem_cgroup_newpage_charge(struct page *page, 287 struct mm_struct *mm, gfp_t gfp_mask) 288 { 289 return 0; 290 } 291 292 static inline int mem_cgroup_cache_charge(struct page *page, 293 struct mm_struct *mm, gfp_t gfp_mask) 294 { 295 return 0; 296 } 297 298 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 299 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) 300 { 301 return 0; 302 } 303 304 static inline void mem_cgroup_commit_charge_swapin(struct page *page, 305 struct mem_cgroup *memcg) 306 { 307 } 308 309 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 310 { 311 } 312 313 static inline void mem_cgroup_uncharge_start(void) 314 { 315 } 316 317 static inline void mem_cgroup_uncharge_end(void) 318 { 319 } 320 321 static inline void mem_cgroup_uncharge_page(struct page *page) 322 { 323 } 324 325 static inline void mem_cgroup_uncharge_cache_page(struct page *page) 326 { 327 } 328 329 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 330 struct mem_cgroup *memcg) 331 { 332 return &zone->lruvec; 333 } 334 335 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 336 struct zone *zone) 337 { 338 return &zone->lruvec; 339 } 340 341 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 342 { 343 return NULL; 344 } 345 346 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 347 { 348 return NULL; 349 } 350 351 static inline bool mm_match_cgroup(struct mm_struct *mm, 352 struct mem_cgroup *memcg) 353 { 354 return true; 355 } 356 357 static inline bool task_in_mem_cgroup(struct task_struct *task, 358 const struct mem_cgroup *memcg) 359 { 360 return true; 361 } 362 363 static inline struct cgroup_subsys_state 364 *mem_cgroup_css(struct mem_cgroup *memcg) 365 { 366 return NULL; 367 } 368 369 static inline void 370 mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 371 struct mem_cgroup **memcgp) 372 { 373 } 374 375 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, 376 struct page *oldpage, struct page *newpage, bool migration_ok) 377 { 378 } 379 static inline struct mem_cgroup * 380 mem_cgroup_iter_cond(struct mem_cgroup *root, 381 struct mem_cgroup *prev, 382 struct mem_cgroup_reclaim_cookie *reclaim, 383 mem_cgroup_iter_filter cond) 384 { 385 /* first call must return non-NULL, second return NULL */ 386 return (struct mem_cgroup *)(unsigned long)!prev; 387 } 388 389 static inline struct mem_cgroup * 390 mem_cgroup_iter(struct mem_cgroup *root, 391 struct mem_cgroup *prev, 392 struct mem_cgroup_reclaim_cookie *reclaim) 393 { 394 return NULL; 395 } 396 397 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 398 struct mem_cgroup *prev) 399 { 400 } 401 402 static inline bool mem_cgroup_disabled(void) 403 { 404 return true; 405 } 406 407 static inline int 408 mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 409 { 410 return 1; 411 } 412 413 static inline unsigned long 414 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 415 { 416 return 0; 417 } 418 419 static inline void 420 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 421 int increment) 422 { 423 } 424 425 static inline void 426 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 427 { 428 } 429 430 static inline void mem_cgroup_begin_update_page_stat(struct page *page, 431 bool *locked, unsigned long *flags) 432 { 433 } 434 435 static inline void mem_cgroup_end_update_page_stat(struct page *page, 436 bool *locked, unsigned long *flags) 437 { 438 } 439 440 static inline bool mem_cgroup_toggle_oom(bool new) 441 { 442 return false; 443 } 444 445 static inline void mem_cgroup_enable_oom(void) 446 { 447 } 448 449 static inline void mem_cgroup_disable_oom(void) 450 { 451 } 452 453 static inline bool task_in_memcg_oom(struct task_struct *p) 454 { 455 return false; 456 } 457 458 static inline bool mem_cgroup_oom_synchronize(void) 459 { 460 return false; 461 } 462 463 static inline void mem_cgroup_inc_page_stat(struct page *page, 464 enum mem_cgroup_stat_index idx) 465 { 466 } 467 468 static inline void mem_cgroup_dec_page_stat(struct page *page, 469 enum mem_cgroup_stat_index idx) 470 { 471 } 472 473 static inline 474 enum mem_cgroup_filter_t 475 mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg, 476 struct mem_cgroup *root) 477 { 478 return VISIT; 479 } 480 481 static inline void mem_cgroup_split_huge_fixup(struct page *head) 482 { 483 } 484 485 static inline 486 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 487 { 488 } 489 static inline void mem_cgroup_replace_page_cache(struct page *oldpage, 490 struct page *newpage) 491 { 492 } 493 #endif /* CONFIG_MEMCG */ 494 495 #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) 496 static inline bool 497 mem_cgroup_bad_page_check(struct page *page) 498 { 499 return false; 500 } 501 502 static inline void 503 mem_cgroup_print_bad_page(struct page *page) 504 { 505 } 506 #endif 507 508 enum { 509 UNDER_LIMIT, 510 SOFT_LIMIT, 511 OVER_LIMIT, 512 }; 513 514 struct sock; 515 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 516 void sock_update_memcg(struct sock *sk); 517 void sock_release_memcg(struct sock *sk); 518 #else 519 static inline void sock_update_memcg(struct sock *sk) 520 { 521 } 522 static inline void sock_release_memcg(struct sock *sk) 523 { 524 } 525 #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ 526 527 #ifdef CONFIG_MEMCG_KMEM 528 extern struct static_key memcg_kmem_enabled_key; 529 530 extern int memcg_limited_groups_array_size; 531 532 /* 533 * Helper macro to loop through all memcg-specific caches. Callers must still 534 * check if the cache is valid (it is either valid or NULL). 535 * the slab_mutex must be held when looping through those caches 536 */ 537 #define for_each_memcg_cache_index(_idx) \ 538 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) 539 540 static inline bool memcg_kmem_enabled(void) 541 { 542 return static_key_false(&memcg_kmem_enabled_key); 543 } 544 545 /* 546 * In general, we'll do everything in our power to not incur in any overhead 547 * for non-memcg users for the kmem functions. Not even a function call, if we 548 * can avoid it. 549 * 550 * Therefore, we'll inline all those functions so that in the best case, we'll 551 * see that kmemcg is off for everybody and proceed quickly. If it is on, 552 * we'll still do most of the flag checking inline. We check a lot of 553 * conditions, but because they are pretty simple, they are expected to be 554 * fast. 555 */ 556 bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, 557 int order); 558 void __memcg_kmem_commit_charge(struct page *page, 559 struct mem_cgroup *memcg, int order); 560 void __memcg_kmem_uncharge_pages(struct page *page, int order); 561 562 int memcg_cache_id(struct mem_cgroup *memcg); 563 int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 564 struct kmem_cache *root_cache); 565 void memcg_release_cache(struct kmem_cache *cachep); 566 void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); 567 568 int memcg_update_cache_size(struct kmem_cache *s, int num_groups); 569 void memcg_update_array_size(int num_groups); 570 571 struct kmem_cache * 572 __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); 573 574 void mem_cgroup_destroy_cache(struct kmem_cache *cachep); 575 void kmem_cache_destroy_memcg_children(struct kmem_cache *s); 576 577 /** 578 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 579 * @gfp: the gfp allocation flags. 580 * @memcg: a pointer to the memcg this was charged against. 581 * @order: allocation order. 582 * 583 * returns true if the memcg where the current task belongs can hold this 584 * allocation. 585 * 586 * We return true automatically if this allocation is not to be accounted to 587 * any memcg. 588 */ 589 static inline bool 590 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 591 { 592 if (!memcg_kmem_enabled()) 593 return true; 594 595 /* 596 * __GFP_NOFAIL allocations will move on even if charging is not 597 * possible. Therefore we don't even try, and have this allocation 598 * unaccounted. We could in theory charge it with 599 * res_counter_charge_nofail, but we hope those allocations are rare, 600 * and won't be worth the trouble. 601 */ 602 if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL)) 603 return true; 604 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 605 return true; 606 607 /* If the test is dying, just let it go. */ 608 if (unlikely(fatal_signal_pending(current))) 609 return true; 610 611 return __memcg_kmem_newpage_charge(gfp, memcg, order); 612 } 613 614 /** 615 * memcg_kmem_uncharge_pages: uncharge pages from memcg 616 * @page: pointer to struct page being freed 617 * @order: allocation order. 618 * 619 * there is no need to specify memcg here, since it is embedded in page_cgroup 620 */ 621 static inline void 622 memcg_kmem_uncharge_pages(struct page *page, int order) 623 { 624 if (memcg_kmem_enabled()) 625 __memcg_kmem_uncharge_pages(page, order); 626 } 627 628 /** 629 * memcg_kmem_commit_charge: embeds correct memcg in a page 630 * @page: pointer to struct page recently allocated 631 * @memcg: the memcg structure we charged against 632 * @order: allocation order. 633 * 634 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or 635 * failure of the allocation. if @page is NULL, this function will revert the 636 * charges. Otherwise, it will commit the memcg given by @memcg to the 637 * corresponding page_cgroup. 638 */ 639 static inline void 640 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 641 { 642 if (memcg_kmem_enabled() && memcg) 643 __memcg_kmem_commit_charge(page, memcg, order); 644 } 645 646 /** 647 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation 648 * @cachep: the original global kmem cache 649 * @gfp: allocation flags. 650 * 651 * This function assumes that the task allocating, which determines the memcg 652 * in the page allocator, belongs to the same cgroup throughout the whole 653 * process. Misacounting can happen if the task calls memcg_kmem_get_cache() 654 * while belonging to a cgroup, and later on changes. This is considered 655 * acceptable, and should only happen upon task migration. 656 * 657 * Before the cache is created by the memcg core, there is also a possible 658 * imbalance: the task belongs to a memcg, but the cache being allocated from 659 * is the global cache, since the child cache is not yet guaranteed to be 660 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be 661 * passed and the page allocator will not attempt any cgroup accounting. 662 */ 663 static __always_inline struct kmem_cache * 664 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 665 { 666 if (!memcg_kmem_enabled()) 667 return cachep; 668 if (gfp & __GFP_NOFAIL) 669 return cachep; 670 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 671 return cachep; 672 if (unlikely(fatal_signal_pending(current))) 673 return cachep; 674 675 return __memcg_kmem_get_cache(cachep, gfp); 676 } 677 #else 678 #define for_each_memcg_cache_index(_idx) \ 679 for (; NULL; ) 680 681 static inline bool memcg_kmem_enabled(void) 682 { 683 return false; 684 } 685 686 static inline bool 687 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 688 { 689 return true; 690 } 691 692 static inline void memcg_kmem_uncharge_pages(struct page *page, int order) 693 { 694 } 695 696 static inline void 697 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 698 { 699 } 700 701 static inline int memcg_cache_id(struct mem_cgroup *memcg) 702 { 703 return -1; 704 } 705 706 static inline int 707 memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, 708 struct kmem_cache *root_cache) 709 { 710 return 0; 711 } 712 713 static inline void memcg_release_cache(struct kmem_cache *cachep) 714 { 715 } 716 717 static inline void memcg_cache_list_add(struct mem_cgroup *memcg, 718 struct kmem_cache *s) 719 { 720 } 721 722 static inline struct kmem_cache * 723 memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 724 { 725 return cachep; 726 } 727 728 static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s) 729 { 730 } 731 #endif /* CONFIG_MEMCG_KMEM */ 732 #endif /* _LINUX_MEMCONTROL_H */ 733 734