1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 /* XXX: why are these zone and not node counters? */ 36 MEMCG_KERNEL_STACK_KB, 37 MEMCG_NR_STAT, 38 }; 39 40 enum memcg_memory_event { 41 MEMCG_LOW, 42 MEMCG_HIGH, 43 MEMCG_MAX, 44 MEMCG_OOM, 45 MEMCG_OOM_KILL, 46 MEMCG_SWAP_HIGH, 47 MEMCG_SWAP_MAX, 48 MEMCG_SWAP_FAIL, 49 MEMCG_NR_MEMORY_EVENTS, 50 }; 51 52 enum mem_cgroup_protection { 53 MEMCG_PROT_NONE, 54 MEMCG_PROT_LOW, 55 MEMCG_PROT_MIN, 56 }; 57 58 struct mem_cgroup_reclaim_cookie { 59 pg_data_t *pgdat; 60 unsigned int generation; 61 }; 62 63 #ifdef CONFIG_MEMCG 64 65 #define MEM_CGROUP_ID_SHIFT 16 66 #define MEM_CGROUP_ID_MAX USHRT_MAX 67 68 struct mem_cgroup_id { 69 int id; 70 refcount_t ref; 71 }; 72 73 /* 74 * Per memcg event counter is incremented at every pagein/pageout. With THP, 75 * it will be incremated by the number of pages. This counter is used for 76 * for trigger some periodic events. This is straightforward and better 77 * than using jiffies etc. to handle periodic memcg event. 78 */ 79 enum mem_cgroup_events_target { 80 MEM_CGROUP_TARGET_THRESH, 81 MEM_CGROUP_TARGET_SOFTLIMIT, 82 MEM_CGROUP_NTARGETS, 83 }; 84 85 struct memcg_vmstats_percpu { 86 long stat[MEMCG_NR_STAT]; 87 unsigned long events[NR_VM_EVENT_ITEMS]; 88 unsigned long nr_page_events; 89 unsigned long targets[MEM_CGROUP_NTARGETS]; 90 }; 91 92 struct mem_cgroup_reclaim_iter { 93 struct mem_cgroup *position; 94 /* scan generation, increased every round-trip */ 95 unsigned int generation; 96 }; 97 98 struct lruvec_stat { 99 long count[NR_VM_NODE_STAT_ITEMS]; 100 }; 101 102 /* 103 * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, 104 * which have elements charged to this memcg. 105 */ 106 struct memcg_shrinker_map { 107 struct rcu_head rcu; 108 unsigned long map[]; 109 }; 110 111 /* 112 * per-node information in memory controller. 113 */ 114 struct mem_cgroup_per_node { 115 struct lruvec lruvec; 116 117 /* Legacy local VM stats */ 118 struct lruvec_stat __percpu *lruvec_stat_local; 119 120 /* Subtree VM stats (batched updates) */ 121 struct lruvec_stat __percpu *lruvec_stat_cpu; 122 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; 123 124 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 125 126 struct mem_cgroup_reclaim_iter iter; 127 128 struct memcg_shrinker_map __rcu *shrinker_map; 129 130 struct rb_node tree_node; /* RB tree node */ 131 unsigned long usage_in_excess;/* Set to the value by which */ 132 /* the soft limit is exceeded*/ 133 bool on_tree; 134 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 135 /* use container_of */ 136 }; 137 138 struct mem_cgroup_threshold { 139 struct eventfd_ctx *eventfd; 140 unsigned long threshold; 141 }; 142 143 /* For threshold */ 144 struct mem_cgroup_threshold_ary { 145 /* An array index points to threshold just below or equal to usage. */ 146 int current_threshold; 147 /* Size of entries[] */ 148 unsigned int size; 149 /* Array of thresholds */ 150 struct mem_cgroup_threshold entries[]; 151 }; 152 153 struct mem_cgroup_thresholds { 154 /* Primary thresholds array */ 155 struct mem_cgroup_threshold_ary *primary; 156 /* 157 * Spare threshold array. 158 * This is needed to make mem_cgroup_unregister_event() "never fail". 159 * It must be able to store at least primary->size - 1 entries. 160 */ 161 struct mem_cgroup_threshold_ary *spare; 162 }; 163 164 enum memcg_kmem_state { 165 KMEM_NONE, 166 KMEM_ALLOCATED, 167 KMEM_ONLINE, 168 }; 169 170 #if defined(CONFIG_SMP) 171 struct memcg_padding { 172 char x[0]; 173 } ____cacheline_internodealigned_in_smp; 174 #define MEMCG_PADDING(name) struct memcg_padding name; 175 #else 176 #define MEMCG_PADDING(name) 177 #endif 178 179 /* 180 * Remember four most recent foreign writebacks with dirty pages in this 181 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 182 * one in a given round, we're likely to catch it later if it keeps 183 * foreign-dirtying, so a fairly low count should be enough. 184 * 185 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 186 */ 187 #define MEMCG_CGWB_FRN_CNT 4 188 189 struct memcg_cgwb_frn { 190 u64 bdi_id; /* bdi->id of the foreign inode */ 191 int memcg_id; /* memcg->css.id of foreign inode */ 192 u64 at; /* jiffies_64 at the time of dirtying */ 193 struct wb_completion done; /* tracks in-flight foreign writebacks */ 194 }; 195 196 /* 197 * Bucket for arbitrarily byte-sized objects charged to a memory 198 * cgroup. The bucket can be reparented in one piece when the cgroup 199 * is destroyed, without having to round up the individual references 200 * of all live memory objects in the wild. 201 */ 202 struct obj_cgroup { 203 struct percpu_ref refcnt; 204 struct mem_cgroup *memcg; 205 atomic_t nr_charged_bytes; 206 union { 207 struct list_head list; 208 struct rcu_head rcu; 209 }; 210 }; 211 212 /* 213 * The memory controller data structure. The memory controller controls both 214 * page cache and RSS per cgroup. We would eventually like to provide 215 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 216 * to help the administrator determine what knobs to tune. 217 */ 218 struct mem_cgroup { 219 struct cgroup_subsys_state css; 220 221 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 222 struct mem_cgroup_id id; 223 224 /* Accounted resources */ 225 struct page_counter memory; 226 struct page_counter swap; 227 228 /* Legacy consumer-oriented counters */ 229 struct page_counter memsw; 230 struct page_counter kmem; 231 struct page_counter tcpmem; 232 233 /* Range enforcement for interrupt charges */ 234 struct work_struct high_work; 235 236 unsigned long soft_limit; 237 238 /* vmpressure notifications */ 239 struct vmpressure vmpressure; 240 241 /* 242 * Should the accounting and control be hierarchical, per subtree? 243 */ 244 bool use_hierarchy; 245 246 /* 247 * Should the OOM killer kill all belonging tasks, had it kill one? 248 */ 249 bool oom_group; 250 251 /* protected by memcg_oom_lock */ 252 bool oom_lock; 253 int under_oom; 254 255 int swappiness; 256 /* OOM-Killer disable */ 257 int oom_kill_disable; 258 259 /* memory.events and memory.events.local */ 260 struct cgroup_file events_file; 261 struct cgroup_file events_local_file; 262 263 /* handle for "memory.swap.events" */ 264 struct cgroup_file swap_events_file; 265 266 /* protect arrays of thresholds */ 267 struct mutex thresholds_lock; 268 269 /* thresholds for memory usage. RCU-protected */ 270 struct mem_cgroup_thresholds thresholds; 271 272 /* thresholds for mem+swap usage. RCU-protected */ 273 struct mem_cgroup_thresholds memsw_thresholds; 274 275 /* For oom notifier event fd */ 276 struct list_head oom_notify; 277 278 /* 279 * Should we move charges of a task when a task is moved into this 280 * mem_cgroup ? And what type of charges should we move ? 281 */ 282 unsigned long move_charge_at_immigrate; 283 /* taken only while moving_account > 0 */ 284 spinlock_t move_lock; 285 unsigned long move_lock_flags; 286 287 MEMCG_PADDING(_pad1_); 288 289 /* 290 * set > 0 if pages under this cgroup are moving to other cgroup. 291 */ 292 atomic_t moving_account; 293 struct task_struct *move_lock_task; 294 295 /* Legacy local VM stats and events */ 296 struct memcg_vmstats_percpu __percpu *vmstats_local; 297 298 /* Subtree VM stats and events (batched updates) */ 299 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 300 301 MEMCG_PADDING(_pad2_); 302 303 atomic_long_t vmstats[MEMCG_NR_STAT]; 304 atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; 305 306 /* memory.events */ 307 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 308 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 309 310 unsigned long socket_pressure; 311 312 /* Legacy tcp memory accounting */ 313 bool tcpmem_active; 314 int tcpmem_pressure; 315 316 #ifdef CONFIG_MEMCG_KMEM 317 /* Index in the kmem_cache->memcg_params.memcg_caches array */ 318 int kmemcg_id; 319 enum memcg_kmem_state kmem_state; 320 struct obj_cgroup __rcu *objcg; 321 struct list_head objcg_list; /* list of inherited objcgs */ 322 #endif 323 324 #ifdef CONFIG_CGROUP_WRITEBACK 325 struct list_head cgwb_list; 326 struct wb_domain cgwb_domain; 327 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 328 #endif 329 330 /* List of events which userspace want to receive */ 331 struct list_head event_list; 332 spinlock_t event_list_lock; 333 334 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 335 struct deferred_split deferred_split_queue; 336 #endif 337 338 struct mem_cgroup_per_node *nodeinfo[0]; 339 /* WARNING: nodeinfo must be the last member here */ 340 }; 341 342 /* 343 * size of first charge trial. "32" comes from vmscan.c's magic value. 344 * TODO: maybe necessary to use big numbers in big irons. 345 */ 346 #define MEMCG_CHARGE_BATCH 32U 347 348 extern struct mem_cgroup *root_mem_cgroup; 349 350 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 351 { 352 return (memcg == root_mem_cgroup); 353 } 354 355 static inline bool mem_cgroup_disabled(void) 356 { 357 return !cgroup_subsys_enabled(memory_cgrp_subsys); 358 } 359 360 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, 361 bool in_low_reclaim) 362 { 363 if (mem_cgroup_disabled()) 364 return 0; 365 366 if (in_low_reclaim) 367 return READ_ONCE(memcg->memory.emin); 368 369 return max(READ_ONCE(memcg->memory.emin), 370 READ_ONCE(memcg->memory.elow)); 371 } 372 373 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 374 struct mem_cgroup *memcg); 375 376 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); 377 378 void mem_cgroup_uncharge(struct page *page); 379 void mem_cgroup_uncharge_list(struct list_head *page_list); 380 381 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); 382 383 static struct mem_cgroup_per_node * 384 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) 385 { 386 return memcg->nodeinfo[nid]; 387 } 388 389 /** 390 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 391 * @memcg: memcg of the wanted lruvec 392 * 393 * Returns the lru list vector holding pages for a given @memcg & 394 * @node combination. This can be the node lruvec, if the memory 395 * controller is disabled. 396 */ 397 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 398 struct pglist_data *pgdat) 399 { 400 struct mem_cgroup_per_node *mz; 401 struct lruvec *lruvec; 402 403 if (mem_cgroup_disabled()) { 404 lruvec = &pgdat->__lruvec; 405 goto out; 406 } 407 408 if (!memcg) 409 memcg = root_mem_cgroup; 410 411 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 412 lruvec = &mz->lruvec; 413 out: 414 /* 415 * Since a node can be onlined after the mem_cgroup was created, 416 * we have to be prepared to initialize lruvec->pgdat here; 417 * and if offlined then reonlined, we need to reinitialize it. 418 */ 419 if (unlikely(lruvec->pgdat != pgdat)) 420 lruvec->pgdat = pgdat; 421 return lruvec; 422 } 423 424 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); 425 426 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 427 428 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 429 430 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); 431 432 static inline 433 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 434 return css ? container_of(css, struct mem_cgroup, css) : NULL; 435 } 436 437 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 438 { 439 return percpu_ref_tryget(&objcg->refcnt); 440 } 441 442 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 443 { 444 percpu_ref_get(&objcg->refcnt); 445 } 446 447 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 448 { 449 percpu_ref_put(&objcg->refcnt); 450 } 451 452 /* 453 * After the initialization objcg->memcg is always pointing at 454 * a valid memcg, but can be atomically swapped to the parent memcg. 455 * 456 * The caller must ensure that the returned memcg won't be released: 457 * e.g. acquire the rcu_read_lock or css_set_lock. 458 */ 459 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 460 { 461 return READ_ONCE(objcg->memcg); 462 } 463 464 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 465 { 466 if (memcg) 467 css_put(&memcg->css); 468 } 469 470 #define mem_cgroup_from_counter(counter, member) \ 471 container_of(counter, struct mem_cgroup, member) 472 473 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 474 struct mem_cgroup *, 475 struct mem_cgroup_reclaim_cookie *); 476 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 477 int mem_cgroup_scan_tasks(struct mem_cgroup *, 478 int (*)(struct task_struct *, void *), void *); 479 480 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 481 { 482 if (mem_cgroup_disabled()) 483 return 0; 484 485 return memcg->id.id; 486 } 487 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 488 489 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 490 { 491 return mem_cgroup_from_css(seq_css(m)); 492 } 493 494 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 495 { 496 struct mem_cgroup_per_node *mz; 497 498 if (mem_cgroup_disabled()) 499 return NULL; 500 501 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 502 return mz->memcg; 503 } 504 505 /** 506 * parent_mem_cgroup - find the accounting parent of a memcg 507 * @memcg: memcg whose parent to find 508 * 509 * Returns the parent memcg, or NULL if this is the root or the memory 510 * controller is in legacy no-hierarchy mode. 511 */ 512 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 513 { 514 if (!memcg->memory.parent) 515 return NULL; 516 return mem_cgroup_from_counter(memcg->memory.parent, memory); 517 } 518 519 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 520 struct mem_cgroup *root) 521 { 522 if (root == memcg) 523 return true; 524 if (!root->use_hierarchy) 525 return false; 526 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 527 } 528 529 static inline bool mm_match_cgroup(struct mm_struct *mm, 530 struct mem_cgroup *memcg) 531 { 532 struct mem_cgroup *task_memcg; 533 bool match = false; 534 535 rcu_read_lock(); 536 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 537 if (task_memcg) 538 match = mem_cgroup_is_descendant(task_memcg, memcg); 539 rcu_read_unlock(); 540 return match; 541 } 542 543 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 544 ino_t page_cgroup_ino(struct page *page); 545 546 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 547 { 548 if (mem_cgroup_disabled()) 549 return true; 550 return !!(memcg->css.flags & CSS_ONLINE); 551 } 552 553 /* 554 * For memory reclaim. 555 */ 556 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 557 558 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 559 int zid, int nr_pages); 560 561 static inline 562 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 563 enum lru_list lru, int zone_idx) 564 { 565 struct mem_cgroup_per_node *mz; 566 567 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 568 return mz->lru_zone_size[zone_idx][lru]; 569 } 570 571 void mem_cgroup_handle_over_high(void); 572 573 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 574 575 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 576 577 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 578 struct task_struct *p); 579 580 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 581 582 static inline void mem_cgroup_enter_user_fault(void) 583 { 584 WARN_ON(current->in_user_fault); 585 current->in_user_fault = 1; 586 } 587 588 static inline void mem_cgroup_exit_user_fault(void) 589 { 590 WARN_ON(!current->in_user_fault); 591 current->in_user_fault = 0; 592 } 593 594 static inline bool task_in_memcg_oom(struct task_struct *p) 595 { 596 return p->memcg_in_oom; 597 } 598 599 bool mem_cgroup_oom_synchronize(bool wait); 600 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 601 struct mem_cgroup *oom_domain); 602 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 603 604 #ifdef CONFIG_MEMCG_SWAP 605 extern bool cgroup_memory_noswap; 606 #endif 607 608 struct mem_cgroup *lock_page_memcg(struct page *page); 609 void __unlock_page_memcg(struct mem_cgroup *memcg); 610 void unlock_page_memcg(struct page *page); 611 612 /* 613 * idx can be of type enum memcg_stat_item or node_stat_item. 614 * Keep in sync with memcg_exact_page_state(). 615 */ 616 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 617 { 618 long x = atomic_long_read(&memcg->vmstats[idx]); 619 #ifdef CONFIG_SMP 620 if (x < 0) 621 x = 0; 622 #endif 623 return x; 624 } 625 626 /* 627 * idx can be of type enum memcg_stat_item or node_stat_item. 628 * Keep in sync with memcg_exact_page_state(). 629 */ 630 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, 631 int idx) 632 { 633 long x = 0; 634 int cpu; 635 636 for_each_possible_cpu(cpu) 637 x += per_cpu(memcg->vmstats_local->stat[idx], cpu); 638 #ifdef CONFIG_SMP 639 if (x < 0) 640 x = 0; 641 #endif 642 return x; 643 } 644 645 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 646 647 /* idx can be of type enum memcg_stat_item or node_stat_item */ 648 static inline void mod_memcg_state(struct mem_cgroup *memcg, 649 int idx, int val) 650 { 651 unsigned long flags; 652 653 local_irq_save(flags); 654 __mod_memcg_state(memcg, idx, val); 655 local_irq_restore(flags); 656 } 657 658 /** 659 * mod_memcg_page_state - update page state statistics 660 * @page: the page 661 * @idx: page state item to account 662 * @val: number of pages (positive or negative) 663 * 664 * The @page must be locked or the caller must use lock_page_memcg() 665 * to prevent double accounting when the page is concurrently being 666 * moved to another memcg: 667 * 668 * lock_page(page) or lock_page_memcg(page) 669 * if (TestClearPageState(page)) 670 * mod_memcg_page_state(page, state, -1); 671 * unlock_page(page) or unlock_page_memcg(page) 672 * 673 * Kernel pages are an exception to this, since they'll never move. 674 */ 675 static inline void __mod_memcg_page_state(struct page *page, 676 int idx, int val) 677 { 678 if (page->mem_cgroup) 679 __mod_memcg_state(page->mem_cgroup, idx, val); 680 } 681 682 static inline void mod_memcg_page_state(struct page *page, 683 int idx, int val) 684 { 685 if (page->mem_cgroup) 686 mod_memcg_state(page->mem_cgroup, idx, val); 687 } 688 689 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 690 enum node_stat_item idx) 691 { 692 struct mem_cgroup_per_node *pn; 693 long x; 694 695 if (mem_cgroup_disabled()) 696 return node_page_state(lruvec_pgdat(lruvec), idx); 697 698 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 699 x = atomic_long_read(&pn->lruvec_stat[idx]); 700 #ifdef CONFIG_SMP 701 if (x < 0) 702 x = 0; 703 #endif 704 return x; 705 } 706 707 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 708 enum node_stat_item idx) 709 { 710 struct mem_cgroup_per_node *pn; 711 long x = 0; 712 int cpu; 713 714 if (mem_cgroup_disabled()) 715 return node_page_state(lruvec_pgdat(lruvec), idx); 716 717 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 718 for_each_possible_cpu(cpu) 719 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu); 720 #ifdef CONFIG_SMP 721 if (x < 0) 722 x = 0; 723 #endif 724 return x; 725 } 726 727 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 728 int val); 729 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 730 int val); 731 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); 732 void mod_memcg_obj_state(void *p, int idx, int val); 733 734 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 735 enum node_stat_item idx, int val) 736 { 737 unsigned long flags; 738 739 local_irq_save(flags); 740 __mod_memcg_lruvec_state(lruvec, idx, val); 741 local_irq_restore(flags); 742 } 743 744 static inline void mod_lruvec_state(struct lruvec *lruvec, 745 enum node_stat_item idx, int val) 746 { 747 unsigned long flags; 748 749 local_irq_save(flags); 750 __mod_lruvec_state(lruvec, idx, val); 751 local_irq_restore(flags); 752 } 753 754 static inline void __mod_lruvec_page_state(struct page *page, 755 enum node_stat_item idx, int val) 756 { 757 struct page *head = compound_head(page); /* rmap on tail pages */ 758 pg_data_t *pgdat = page_pgdat(page); 759 struct lruvec *lruvec; 760 761 /* Untracked pages have no memcg, no lruvec. Update only the node */ 762 if (!head->mem_cgroup) { 763 __mod_node_page_state(pgdat, idx, val); 764 return; 765 } 766 767 lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat); 768 __mod_lruvec_state(lruvec, idx, val); 769 } 770 771 static inline void mod_lruvec_page_state(struct page *page, 772 enum node_stat_item idx, int val) 773 { 774 unsigned long flags; 775 776 local_irq_save(flags); 777 __mod_lruvec_page_state(page, idx, val); 778 local_irq_restore(flags); 779 } 780 781 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 782 gfp_t gfp_mask, 783 unsigned long *total_scanned); 784 785 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 786 unsigned long count); 787 788 static inline void count_memcg_events(struct mem_cgroup *memcg, 789 enum vm_event_item idx, 790 unsigned long count) 791 { 792 unsigned long flags; 793 794 local_irq_save(flags); 795 __count_memcg_events(memcg, idx, count); 796 local_irq_restore(flags); 797 } 798 799 static inline void count_memcg_page_event(struct page *page, 800 enum vm_event_item idx) 801 { 802 if (page->mem_cgroup) 803 count_memcg_events(page->mem_cgroup, idx, 1); 804 } 805 806 static inline void count_memcg_event_mm(struct mm_struct *mm, 807 enum vm_event_item idx) 808 { 809 struct mem_cgroup *memcg; 810 811 if (mem_cgroup_disabled()) 812 return; 813 814 rcu_read_lock(); 815 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 816 if (likely(memcg)) 817 count_memcg_events(memcg, idx, 1); 818 rcu_read_unlock(); 819 } 820 821 static inline void memcg_memory_event(struct mem_cgroup *memcg, 822 enum memcg_memory_event event) 823 { 824 atomic_long_inc(&memcg->memory_events_local[event]); 825 cgroup_file_notify(&memcg->events_local_file); 826 827 do { 828 atomic_long_inc(&memcg->memory_events[event]); 829 cgroup_file_notify(&memcg->events_file); 830 831 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 832 break; 833 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 834 break; 835 } while ((memcg = parent_mem_cgroup(memcg)) && 836 !mem_cgroup_is_root(memcg)); 837 } 838 839 static inline void memcg_memory_event_mm(struct mm_struct *mm, 840 enum memcg_memory_event event) 841 { 842 struct mem_cgroup *memcg; 843 844 if (mem_cgroup_disabled()) 845 return; 846 847 rcu_read_lock(); 848 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 849 if (likely(memcg)) 850 memcg_memory_event(memcg, event); 851 rcu_read_unlock(); 852 } 853 854 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 855 void mem_cgroup_split_huge_fixup(struct page *head); 856 #endif 857 858 #else /* CONFIG_MEMCG */ 859 860 #define MEM_CGROUP_ID_SHIFT 0 861 #define MEM_CGROUP_ID_MAX 0 862 863 struct mem_cgroup; 864 865 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 866 { 867 return true; 868 } 869 870 static inline bool mem_cgroup_disabled(void) 871 { 872 return true; 873 } 874 875 static inline void memcg_memory_event(struct mem_cgroup *memcg, 876 enum memcg_memory_event event) 877 { 878 } 879 880 static inline void memcg_memory_event_mm(struct mm_struct *mm, 881 enum memcg_memory_event event) 882 { 883 } 884 885 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, 886 bool in_low_reclaim) 887 { 888 return 0; 889 } 890 891 static inline enum mem_cgroup_protection mem_cgroup_protected( 892 struct mem_cgroup *root, struct mem_cgroup *memcg) 893 { 894 return MEMCG_PROT_NONE; 895 } 896 897 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, 898 gfp_t gfp_mask) 899 { 900 return 0; 901 } 902 903 static inline void mem_cgroup_uncharge(struct page *page) 904 { 905 } 906 907 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 908 { 909 } 910 911 static inline void mem_cgroup_migrate(struct page *old, struct page *new) 912 { 913 } 914 915 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 916 struct pglist_data *pgdat) 917 { 918 return &pgdat->__lruvec; 919 } 920 921 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 922 struct pglist_data *pgdat) 923 { 924 return &pgdat->__lruvec; 925 } 926 927 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 928 { 929 return NULL; 930 } 931 932 static inline bool mm_match_cgroup(struct mm_struct *mm, 933 struct mem_cgroup *memcg) 934 { 935 return true; 936 } 937 938 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 939 { 940 return NULL; 941 } 942 943 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 944 { 945 return NULL; 946 } 947 948 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 949 { 950 } 951 952 static inline struct mem_cgroup * 953 mem_cgroup_iter(struct mem_cgroup *root, 954 struct mem_cgroup *prev, 955 struct mem_cgroup_reclaim_cookie *reclaim) 956 { 957 return NULL; 958 } 959 960 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 961 struct mem_cgroup *prev) 962 { 963 } 964 965 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 966 int (*fn)(struct task_struct *, void *), void *arg) 967 { 968 return 0; 969 } 970 971 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 972 { 973 return 0; 974 } 975 976 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 977 { 978 WARN_ON_ONCE(id); 979 /* XXX: This should always return root_mem_cgroup */ 980 return NULL; 981 } 982 983 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 984 { 985 return NULL; 986 } 987 988 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 989 { 990 return NULL; 991 } 992 993 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 994 { 995 return true; 996 } 997 998 static inline 999 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1000 enum lru_list lru, int zone_idx) 1001 { 1002 return 0; 1003 } 1004 1005 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1006 { 1007 return 0; 1008 } 1009 1010 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1011 { 1012 return 0; 1013 } 1014 1015 static inline void 1016 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1017 { 1018 } 1019 1020 static inline void 1021 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1022 { 1023 } 1024 1025 static inline struct mem_cgroup *lock_page_memcg(struct page *page) 1026 { 1027 return NULL; 1028 } 1029 1030 static inline void __unlock_page_memcg(struct mem_cgroup *memcg) 1031 { 1032 } 1033 1034 static inline void unlock_page_memcg(struct page *page) 1035 { 1036 } 1037 1038 static inline void mem_cgroup_handle_over_high(void) 1039 { 1040 } 1041 1042 static inline void mem_cgroup_enter_user_fault(void) 1043 { 1044 } 1045 1046 static inline void mem_cgroup_exit_user_fault(void) 1047 { 1048 } 1049 1050 static inline bool task_in_memcg_oom(struct task_struct *p) 1051 { 1052 return false; 1053 } 1054 1055 static inline bool mem_cgroup_oom_synchronize(bool wait) 1056 { 1057 return false; 1058 } 1059 1060 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1061 struct task_struct *victim, struct mem_cgroup *oom_domain) 1062 { 1063 return NULL; 1064 } 1065 1066 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1067 { 1068 } 1069 1070 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1071 { 1072 return 0; 1073 } 1074 1075 static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, 1076 int idx) 1077 { 1078 return 0; 1079 } 1080 1081 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1082 int idx, 1083 int nr) 1084 { 1085 } 1086 1087 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1088 int idx, 1089 int nr) 1090 { 1091 } 1092 1093 static inline void __mod_memcg_page_state(struct page *page, 1094 int idx, 1095 int nr) 1096 { 1097 } 1098 1099 static inline void mod_memcg_page_state(struct page *page, 1100 int idx, 1101 int nr) 1102 { 1103 } 1104 1105 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1106 enum node_stat_item idx) 1107 { 1108 return node_page_state(lruvec_pgdat(lruvec), idx); 1109 } 1110 1111 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1112 enum node_stat_item idx) 1113 { 1114 return node_page_state(lruvec_pgdat(lruvec), idx); 1115 } 1116 1117 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1118 enum node_stat_item idx, int val) 1119 { 1120 } 1121 1122 static inline void __mod_lruvec_state(struct lruvec *lruvec, 1123 enum node_stat_item idx, int val) 1124 { 1125 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 1126 } 1127 1128 static inline void mod_lruvec_state(struct lruvec *lruvec, 1129 enum node_stat_item idx, int val) 1130 { 1131 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 1132 } 1133 1134 static inline void __mod_lruvec_page_state(struct page *page, 1135 enum node_stat_item idx, int val) 1136 { 1137 __mod_node_page_state(page_pgdat(page), idx, val); 1138 } 1139 1140 static inline void mod_lruvec_page_state(struct page *page, 1141 enum node_stat_item idx, int val) 1142 { 1143 mod_node_page_state(page_pgdat(page), idx, val); 1144 } 1145 1146 static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, 1147 int val) 1148 { 1149 struct page *page = virt_to_head_page(p); 1150 1151 __mod_node_page_state(page_pgdat(page), idx, val); 1152 } 1153 1154 static inline void mod_memcg_obj_state(void *p, int idx, int val) 1155 { 1156 } 1157 1158 static inline 1159 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1160 gfp_t gfp_mask, 1161 unsigned long *total_scanned) 1162 { 1163 return 0; 1164 } 1165 1166 static inline void mem_cgroup_split_huge_fixup(struct page *head) 1167 { 1168 } 1169 1170 static inline void count_memcg_events(struct mem_cgroup *memcg, 1171 enum vm_event_item idx, 1172 unsigned long count) 1173 { 1174 } 1175 1176 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1177 enum vm_event_item idx, 1178 unsigned long count) 1179 { 1180 } 1181 1182 static inline void count_memcg_page_event(struct page *page, 1183 int idx) 1184 { 1185 } 1186 1187 static inline 1188 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1189 { 1190 } 1191 #endif /* CONFIG_MEMCG */ 1192 1193 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1194 static inline void __inc_memcg_state(struct mem_cgroup *memcg, 1195 int idx) 1196 { 1197 __mod_memcg_state(memcg, idx, 1); 1198 } 1199 1200 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1201 static inline void __dec_memcg_state(struct mem_cgroup *memcg, 1202 int idx) 1203 { 1204 __mod_memcg_state(memcg, idx, -1); 1205 } 1206 1207 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1208 static inline void __inc_memcg_page_state(struct page *page, 1209 int idx) 1210 { 1211 __mod_memcg_page_state(page, idx, 1); 1212 } 1213 1214 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1215 static inline void __dec_memcg_page_state(struct page *page, 1216 int idx) 1217 { 1218 __mod_memcg_page_state(page, idx, -1); 1219 } 1220 1221 static inline void __inc_lruvec_state(struct lruvec *lruvec, 1222 enum node_stat_item idx) 1223 { 1224 __mod_lruvec_state(lruvec, idx, 1); 1225 } 1226 1227 static inline void __dec_lruvec_state(struct lruvec *lruvec, 1228 enum node_stat_item idx) 1229 { 1230 __mod_lruvec_state(lruvec, idx, -1); 1231 } 1232 1233 static inline void __inc_lruvec_page_state(struct page *page, 1234 enum node_stat_item idx) 1235 { 1236 __mod_lruvec_page_state(page, idx, 1); 1237 } 1238 1239 static inline void __dec_lruvec_page_state(struct page *page, 1240 enum node_stat_item idx) 1241 { 1242 __mod_lruvec_page_state(page, idx, -1); 1243 } 1244 1245 static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) 1246 { 1247 __mod_lruvec_slab_state(p, idx, 1); 1248 } 1249 1250 static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) 1251 { 1252 __mod_lruvec_slab_state(p, idx, -1); 1253 } 1254 1255 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1256 static inline void inc_memcg_state(struct mem_cgroup *memcg, 1257 int idx) 1258 { 1259 mod_memcg_state(memcg, idx, 1); 1260 } 1261 1262 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1263 static inline void dec_memcg_state(struct mem_cgroup *memcg, 1264 int idx) 1265 { 1266 mod_memcg_state(memcg, idx, -1); 1267 } 1268 1269 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1270 static inline void inc_memcg_page_state(struct page *page, 1271 int idx) 1272 { 1273 mod_memcg_page_state(page, idx, 1); 1274 } 1275 1276 /* idx can be of type enum memcg_stat_item or node_stat_item */ 1277 static inline void dec_memcg_page_state(struct page *page, 1278 int idx) 1279 { 1280 mod_memcg_page_state(page, idx, -1); 1281 } 1282 1283 static inline void inc_lruvec_state(struct lruvec *lruvec, 1284 enum node_stat_item idx) 1285 { 1286 mod_lruvec_state(lruvec, idx, 1); 1287 } 1288 1289 static inline void dec_lruvec_state(struct lruvec *lruvec, 1290 enum node_stat_item idx) 1291 { 1292 mod_lruvec_state(lruvec, idx, -1); 1293 } 1294 1295 static inline void inc_lruvec_page_state(struct page *page, 1296 enum node_stat_item idx) 1297 { 1298 mod_lruvec_page_state(page, idx, 1); 1299 } 1300 1301 static inline void dec_lruvec_page_state(struct page *page, 1302 enum node_stat_item idx) 1303 { 1304 mod_lruvec_page_state(page, idx, -1); 1305 } 1306 1307 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1308 { 1309 struct mem_cgroup *memcg; 1310 1311 memcg = lruvec_memcg(lruvec); 1312 if (!memcg) 1313 return NULL; 1314 memcg = parent_mem_cgroup(memcg); 1315 if (!memcg) 1316 return NULL; 1317 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1318 } 1319 1320 #ifdef CONFIG_CGROUP_WRITEBACK 1321 1322 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1323 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1324 unsigned long *pheadroom, unsigned long *pdirty, 1325 unsigned long *pwriteback); 1326 1327 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 1328 struct bdi_writeback *wb); 1329 1330 static inline void mem_cgroup_track_foreign_dirty(struct page *page, 1331 struct bdi_writeback *wb) 1332 { 1333 if (mem_cgroup_disabled()) 1334 return; 1335 1336 if (unlikely(&page->mem_cgroup->css != wb->memcg_css)) 1337 mem_cgroup_track_foreign_dirty_slowpath(page, wb); 1338 } 1339 1340 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1341 1342 #else /* CONFIG_CGROUP_WRITEBACK */ 1343 1344 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1345 { 1346 return NULL; 1347 } 1348 1349 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1350 unsigned long *pfilepages, 1351 unsigned long *pheadroom, 1352 unsigned long *pdirty, 1353 unsigned long *pwriteback) 1354 { 1355 } 1356 1357 static inline void mem_cgroup_track_foreign_dirty(struct page *page, 1358 struct bdi_writeback *wb) 1359 { 1360 } 1361 1362 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1363 { 1364 } 1365 1366 #endif /* CONFIG_CGROUP_WRITEBACK */ 1367 1368 struct sock; 1369 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1370 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1371 #ifdef CONFIG_MEMCG 1372 extern struct static_key_false memcg_sockets_enabled_key; 1373 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1374 void mem_cgroup_sk_alloc(struct sock *sk); 1375 void mem_cgroup_sk_free(struct sock *sk); 1376 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1377 { 1378 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1379 return true; 1380 do { 1381 if (time_before(jiffies, memcg->socket_pressure)) 1382 return true; 1383 } while ((memcg = parent_mem_cgroup(memcg))); 1384 return false; 1385 } 1386 1387 extern int memcg_expand_shrinker_maps(int new_id); 1388 1389 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1390 int nid, int shrinker_id); 1391 #else 1392 #define mem_cgroup_sockets_enabled 0 1393 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1394 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1395 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1396 { 1397 return false; 1398 } 1399 1400 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1401 int nid, int shrinker_id) 1402 { 1403 } 1404 #endif 1405 1406 #ifdef CONFIG_MEMCG_KMEM 1407 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 1408 unsigned int nr_pages); 1409 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); 1410 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1411 void __memcg_kmem_uncharge_page(struct page *page, int order); 1412 1413 struct obj_cgroup *get_obj_cgroup_from_current(void); 1414 1415 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1416 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1417 1418 extern struct static_key_false memcg_kmem_enabled_key; 1419 1420 extern int memcg_nr_cache_ids; 1421 void memcg_get_cache_ids(void); 1422 void memcg_put_cache_ids(void); 1423 1424 /* 1425 * Helper macro to loop through all memcg-specific caches. Callers must still 1426 * check if the cache is valid (it is either valid or NULL). 1427 * the slab_mutex must be held when looping through those caches 1428 */ 1429 #define for_each_memcg_cache_index(_idx) \ 1430 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) 1431 1432 static inline bool memcg_kmem_enabled(void) 1433 { 1434 return static_branch_unlikely(&memcg_kmem_enabled_key); 1435 } 1436 1437 static inline bool memcg_kmem_bypass(void) 1438 { 1439 if (in_interrupt()) 1440 return true; 1441 1442 /* Allow remote memcg charging in kthread contexts. */ 1443 if ((!current->mm || (current->flags & PF_KTHREAD)) && 1444 !current->active_memcg) 1445 return true; 1446 return false; 1447 } 1448 1449 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1450 int order) 1451 { 1452 if (memcg_kmem_enabled()) 1453 return __memcg_kmem_charge_page(page, gfp, order); 1454 return 0; 1455 } 1456 1457 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1458 { 1459 if (memcg_kmem_enabled()) 1460 __memcg_kmem_uncharge_page(page, order); 1461 } 1462 1463 static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, 1464 unsigned int nr_pages) 1465 { 1466 if (memcg_kmem_enabled()) 1467 return __memcg_kmem_charge(memcg, gfp, nr_pages); 1468 return 0; 1469 } 1470 1471 static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg, 1472 unsigned int nr_pages) 1473 { 1474 if (memcg_kmem_enabled()) 1475 __memcg_kmem_uncharge(memcg, nr_pages); 1476 } 1477 1478 /* 1479 * helper for accessing a memcg's index. It will be used as an index in the 1480 * child cache array in kmem_cache, and also to derive its name. This function 1481 * will return -1 when this is not a kmem-limited memcg. 1482 */ 1483 static inline int memcg_cache_id(struct mem_cgroup *memcg) 1484 { 1485 return memcg ? memcg->kmemcg_id : -1; 1486 } 1487 1488 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1489 1490 #else 1491 1492 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1493 int order) 1494 { 1495 return 0; 1496 } 1497 1498 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1499 { 1500 } 1501 1502 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1503 int order) 1504 { 1505 return 0; 1506 } 1507 1508 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1509 { 1510 } 1511 1512 #define for_each_memcg_cache_index(_idx) \ 1513 for (; NULL; ) 1514 1515 static inline bool memcg_kmem_enabled(void) 1516 { 1517 return false; 1518 } 1519 1520 static inline int memcg_cache_id(struct mem_cgroup *memcg) 1521 { 1522 return -1; 1523 } 1524 1525 static inline void memcg_get_cache_ids(void) 1526 { 1527 } 1528 1529 static inline void memcg_put_cache_ids(void) 1530 { 1531 } 1532 1533 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1534 { 1535 return NULL; 1536 } 1537 1538 #endif /* CONFIG_MEMCG_KMEM */ 1539 1540 #endif /* _LINUX_MEMCONTROL_H */ 1541