1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_KMEM, 38 MEMCG_ZSWAP_B, 39 MEMCG_ZSWAPPED, 40 MEMCG_NR_STAT, 41 }; 42 43 enum memcg_memory_event { 44 MEMCG_LOW, 45 MEMCG_HIGH, 46 MEMCG_MAX, 47 MEMCG_OOM, 48 MEMCG_OOM_KILL, 49 MEMCG_OOM_GROUP_KILL, 50 MEMCG_SWAP_HIGH, 51 MEMCG_SWAP_MAX, 52 MEMCG_SWAP_FAIL, 53 MEMCG_NR_MEMORY_EVENTS, 54 }; 55 56 struct mem_cgroup_reclaim_cookie { 57 pg_data_t *pgdat; 58 unsigned int generation; 59 }; 60 61 #ifdef CONFIG_MEMCG 62 63 #define MEM_CGROUP_ID_SHIFT 16 64 #define MEM_CGROUP_ID_MAX USHRT_MAX 65 66 struct mem_cgroup_id { 67 int id; 68 refcount_t ref; 69 }; 70 71 /* 72 * Per memcg event counter is incremented at every pagein/pageout. With THP, 73 * it will be incremented by the number of pages. This counter is used 74 * to trigger some periodic events. This is straightforward and better 75 * than using jiffies etc. to handle periodic memcg event. 76 */ 77 enum mem_cgroup_events_target { 78 MEM_CGROUP_TARGET_THRESH, 79 MEM_CGROUP_TARGET_SOFTLIMIT, 80 MEM_CGROUP_NTARGETS, 81 }; 82 83 struct memcg_vmstats_percpu; 84 struct memcg_vmstats; 85 86 struct mem_cgroup_reclaim_iter { 87 struct mem_cgroup *position; 88 /* scan generation, increased every round-trip */ 89 unsigned int generation; 90 }; 91 92 /* 93 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 94 * shrinkers, which have elements charged to this memcg. 95 */ 96 struct shrinker_info { 97 struct rcu_head rcu; 98 atomic_long_t *nr_deferred; 99 unsigned long *map; 100 int map_nr_max; 101 }; 102 103 struct lruvec_stats_percpu { 104 /* Local (CPU and cgroup) state */ 105 long state[NR_VM_NODE_STAT_ITEMS]; 106 107 /* Delta calculation for lockless upward propagation */ 108 long state_prev[NR_VM_NODE_STAT_ITEMS]; 109 }; 110 111 struct lruvec_stats { 112 /* Aggregated (CPU and subtree) state */ 113 long state[NR_VM_NODE_STAT_ITEMS]; 114 115 /* Pending child counts during tree propagation */ 116 long state_pending[NR_VM_NODE_STAT_ITEMS]; 117 }; 118 119 /* 120 * per-node information in memory controller. 121 */ 122 struct mem_cgroup_per_node { 123 struct lruvec lruvec; 124 125 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 126 struct lruvec_stats lruvec_stats; 127 128 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 129 130 struct mem_cgroup_reclaim_iter iter; 131 132 struct shrinker_info __rcu *shrinker_info; 133 134 struct rb_node tree_node; /* RB tree node */ 135 unsigned long usage_in_excess;/* Set to the value by which */ 136 /* the soft limit is exceeded*/ 137 bool on_tree; 138 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 139 /* use container_of */ 140 }; 141 142 struct mem_cgroup_threshold { 143 struct eventfd_ctx *eventfd; 144 unsigned long threshold; 145 }; 146 147 /* For threshold */ 148 struct mem_cgroup_threshold_ary { 149 /* An array index points to threshold just below or equal to usage. */ 150 int current_threshold; 151 /* Size of entries[] */ 152 unsigned int size; 153 /* Array of thresholds */ 154 struct mem_cgroup_threshold entries[]; 155 }; 156 157 struct mem_cgroup_thresholds { 158 /* Primary thresholds array */ 159 struct mem_cgroup_threshold_ary *primary; 160 /* 161 * Spare threshold array. 162 * This is needed to make mem_cgroup_unregister_event() "never fail". 163 * It must be able to store at least primary->size - 1 entries. 164 */ 165 struct mem_cgroup_threshold_ary *spare; 166 }; 167 168 /* 169 * Remember four most recent foreign writebacks with dirty pages in this 170 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 171 * one in a given round, we're likely to catch it later if it keeps 172 * foreign-dirtying, so a fairly low count should be enough. 173 * 174 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 175 */ 176 #define MEMCG_CGWB_FRN_CNT 4 177 178 struct memcg_cgwb_frn { 179 u64 bdi_id; /* bdi->id of the foreign inode */ 180 int memcg_id; /* memcg->css.id of foreign inode */ 181 u64 at; /* jiffies_64 at the time of dirtying */ 182 struct wb_completion done; /* tracks in-flight foreign writebacks */ 183 }; 184 185 /* 186 * Bucket for arbitrarily byte-sized objects charged to a memory 187 * cgroup. The bucket can be reparented in one piece when the cgroup 188 * is destroyed, without having to round up the individual references 189 * of all live memory objects in the wild. 190 */ 191 struct obj_cgroup { 192 struct percpu_ref refcnt; 193 struct mem_cgroup *memcg; 194 atomic_t nr_charged_bytes; 195 union { 196 struct list_head list; /* protected by objcg_lock */ 197 struct rcu_head rcu; 198 }; 199 }; 200 201 /* 202 * The memory controller data structure. The memory controller controls both 203 * page cache and RSS per cgroup. We would eventually like to provide 204 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 205 * to help the administrator determine what knobs to tune. 206 */ 207 struct mem_cgroup { 208 struct cgroup_subsys_state css; 209 210 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 211 struct mem_cgroup_id id; 212 213 /* Accounted resources */ 214 struct page_counter memory; /* Both v1 & v2 */ 215 216 union { 217 struct page_counter swap; /* v2 only */ 218 struct page_counter memsw; /* v1 only */ 219 }; 220 221 /* Legacy consumer-oriented counters */ 222 struct page_counter kmem; /* v1 only */ 223 struct page_counter tcpmem; /* v1 only */ 224 225 /* Range enforcement for interrupt charges */ 226 struct work_struct high_work; 227 228 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 229 unsigned long zswap_max; 230 #endif 231 232 unsigned long soft_limit; 233 234 /* vmpressure notifications */ 235 struct vmpressure vmpressure; 236 237 /* 238 * Should the OOM killer kill all belonging tasks, had it kill one? 239 */ 240 bool oom_group; 241 242 /* protected by memcg_oom_lock */ 243 bool oom_lock; 244 int under_oom; 245 246 int swappiness; 247 /* OOM-Killer disable */ 248 int oom_kill_disable; 249 250 /* memory.events and memory.events.local */ 251 struct cgroup_file events_file; 252 struct cgroup_file events_local_file; 253 254 /* handle for "memory.swap.events" */ 255 struct cgroup_file swap_events_file; 256 257 /* protect arrays of thresholds */ 258 struct mutex thresholds_lock; 259 260 /* thresholds for memory usage. RCU-protected */ 261 struct mem_cgroup_thresholds thresholds; 262 263 /* thresholds for mem+swap usage. RCU-protected */ 264 struct mem_cgroup_thresholds memsw_thresholds; 265 266 /* For oom notifier event fd */ 267 struct list_head oom_notify; 268 269 /* 270 * Should we move charges of a task when a task is moved into this 271 * mem_cgroup ? And what type of charges should we move ? 272 */ 273 unsigned long move_charge_at_immigrate; 274 /* taken only while moving_account > 0 */ 275 spinlock_t move_lock; 276 unsigned long move_lock_flags; 277 278 CACHELINE_PADDING(_pad1_); 279 280 /* memory.stat */ 281 struct memcg_vmstats *vmstats; 282 283 /* memory.events */ 284 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 285 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 286 287 unsigned long socket_pressure; 288 289 /* Legacy tcp memory accounting */ 290 bool tcpmem_active; 291 int tcpmem_pressure; 292 293 #ifdef CONFIG_MEMCG_KMEM 294 int kmemcg_id; 295 struct obj_cgroup __rcu *objcg; 296 /* list of inherited objcgs, protected by objcg_lock */ 297 struct list_head objcg_list; 298 #endif 299 300 CACHELINE_PADDING(_pad2_); 301 302 /* 303 * set > 0 if pages under this cgroup are moving to other cgroup. 304 */ 305 atomic_t moving_account; 306 struct task_struct *move_lock_task; 307 308 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 309 310 #ifdef CONFIG_CGROUP_WRITEBACK 311 struct list_head cgwb_list; 312 struct wb_domain cgwb_domain; 313 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 314 #endif 315 316 /* List of events which userspace want to receive */ 317 struct list_head event_list; 318 spinlock_t event_list_lock; 319 320 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 321 struct deferred_split deferred_split_queue; 322 #endif 323 324 #ifdef CONFIG_LRU_GEN 325 /* per-memcg mm_struct list */ 326 struct lru_gen_mm_list mm_list; 327 #endif 328 329 struct mem_cgroup_per_node *nodeinfo[]; 330 }; 331 332 /* 333 * size of first charge trial. 334 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 335 * workload. 336 */ 337 #define MEMCG_CHARGE_BATCH 64U 338 339 extern struct mem_cgroup *root_mem_cgroup; 340 341 enum page_memcg_data_flags { 342 /* page->memcg_data is a pointer to an objcgs vector */ 343 MEMCG_DATA_OBJCGS = (1UL << 0), 344 /* page has been accounted as a non-slab kernel page */ 345 MEMCG_DATA_KMEM = (1UL << 1), 346 /* the next bit after the last actual flag */ 347 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 348 }; 349 350 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 351 352 static inline bool folio_memcg_kmem(struct folio *folio); 353 354 /* 355 * After the initialization objcg->memcg is always pointing at 356 * a valid memcg, but can be atomically swapped to the parent memcg. 357 * 358 * The caller must ensure that the returned memcg won't be released: 359 * e.g. acquire the rcu_read_lock or css_set_lock. 360 */ 361 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 362 { 363 return READ_ONCE(objcg->memcg); 364 } 365 366 /* 367 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 368 * @folio: Pointer to the folio. 369 * 370 * Returns a pointer to the memory cgroup associated with the folio, 371 * or NULL. This function assumes that the folio is known to have a 372 * proper memory cgroup pointer. It's not safe to call this function 373 * against some type of folios, e.g. slab folios or ex-slab folios or 374 * kmem folios. 375 */ 376 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 377 { 378 unsigned long memcg_data = folio->memcg_data; 379 380 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 381 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 382 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 383 384 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 385 } 386 387 /* 388 * __folio_objcg - get the object cgroup associated with a kmem folio. 389 * @folio: Pointer to the folio. 390 * 391 * Returns a pointer to the object cgroup associated with the folio, 392 * or NULL. This function assumes that the folio is known to have a 393 * proper object cgroup pointer. It's not safe to call this function 394 * against some type of folios, e.g. slab folios or ex-slab folios or 395 * LRU folios. 396 */ 397 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 398 { 399 unsigned long memcg_data = folio->memcg_data; 400 401 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 402 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 403 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 404 405 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 406 } 407 408 /* 409 * folio_memcg - Get the memory cgroup associated with a folio. 410 * @folio: Pointer to the folio. 411 * 412 * Returns a pointer to the memory cgroup associated with the folio, 413 * or NULL. This function assumes that the folio is known to have a 414 * proper memory cgroup pointer. It's not safe to call this function 415 * against some type of folios, e.g. slab folios or ex-slab folios. 416 * 417 * For a non-kmem folio any of the following ensures folio and memcg binding 418 * stability: 419 * 420 * - the folio lock 421 * - LRU isolation 422 * - folio_memcg_lock() 423 * - exclusive reference 424 * - mem_cgroup_trylock_pages() 425 * 426 * For a kmem folio a caller should hold an rcu read lock to protect memcg 427 * associated with a kmem folio from being released. 428 */ 429 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 430 { 431 if (folio_memcg_kmem(folio)) 432 return obj_cgroup_memcg(__folio_objcg(folio)); 433 return __folio_memcg(folio); 434 } 435 436 static inline struct mem_cgroup *page_memcg(struct page *page) 437 { 438 return folio_memcg(page_folio(page)); 439 } 440 441 /** 442 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 443 * @folio: Pointer to the folio. 444 * 445 * This function assumes that the folio is known to have a 446 * proper memory cgroup pointer. It's not safe to call this function 447 * against some type of folios, e.g. slab folios or ex-slab folios. 448 * 449 * Return: A pointer to the memory cgroup associated with the folio, 450 * or NULL. 451 */ 452 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 453 { 454 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 455 456 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 457 WARN_ON_ONCE(!rcu_read_lock_held()); 458 459 if (memcg_data & MEMCG_DATA_KMEM) { 460 struct obj_cgroup *objcg; 461 462 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 463 return obj_cgroup_memcg(objcg); 464 } 465 466 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 467 } 468 469 /* 470 * folio_memcg_check - Get the memory cgroup associated with a folio. 471 * @folio: Pointer to the folio. 472 * 473 * Returns a pointer to the memory cgroup associated with the folio, 474 * or NULL. This function unlike folio_memcg() can take any folio 475 * as an argument. It has to be used in cases when it's not known if a folio 476 * has an associated memory cgroup pointer or an object cgroups vector or 477 * an object cgroup. 478 * 479 * For a non-kmem folio any of the following ensures folio and memcg binding 480 * stability: 481 * 482 * - the folio lock 483 * - LRU isolation 484 * - lock_folio_memcg() 485 * - exclusive reference 486 * - mem_cgroup_trylock_pages() 487 * 488 * For a kmem folio a caller should hold an rcu read lock to protect memcg 489 * associated with a kmem folio from being released. 490 */ 491 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 492 { 493 /* 494 * Because folio->memcg_data might be changed asynchronously 495 * for slabs, READ_ONCE() should be used here. 496 */ 497 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 498 499 if (memcg_data & MEMCG_DATA_OBJCGS) 500 return NULL; 501 502 if (memcg_data & MEMCG_DATA_KMEM) { 503 struct obj_cgroup *objcg; 504 505 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 506 return obj_cgroup_memcg(objcg); 507 } 508 509 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 510 } 511 512 static inline struct mem_cgroup *page_memcg_check(struct page *page) 513 { 514 if (PageTail(page)) 515 return NULL; 516 return folio_memcg_check((struct folio *)page); 517 } 518 519 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 520 { 521 struct mem_cgroup *memcg; 522 523 rcu_read_lock(); 524 retry: 525 memcg = obj_cgroup_memcg(objcg); 526 if (unlikely(!css_tryget(&memcg->css))) 527 goto retry; 528 rcu_read_unlock(); 529 530 return memcg; 531 } 532 533 #ifdef CONFIG_MEMCG_KMEM 534 /* 535 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 536 * @folio: Pointer to the folio. 537 * 538 * Checks if the folio has MemcgKmem flag set. The caller must ensure 539 * that the folio has an associated memory cgroup. It's not safe to call 540 * this function against some types of folios, e.g. slab folios. 541 */ 542 static inline bool folio_memcg_kmem(struct folio *folio) 543 { 544 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 545 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 546 return folio->memcg_data & MEMCG_DATA_KMEM; 547 } 548 549 550 #else 551 static inline bool folio_memcg_kmem(struct folio *folio) 552 { 553 return false; 554 } 555 556 #endif 557 558 static inline bool PageMemcgKmem(struct page *page) 559 { 560 return folio_memcg_kmem(page_folio(page)); 561 } 562 563 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 564 { 565 return (memcg == root_mem_cgroup); 566 } 567 568 static inline bool mem_cgroup_disabled(void) 569 { 570 return !cgroup_subsys_enabled(memory_cgrp_subsys); 571 } 572 573 static inline void mem_cgroup_protection(struct mem_cgroup *root, 574 struct mem_cgroup *memcg, 575 unsigned long *min, 576 unsigned long *low) 577 { 578 *min = *low = 0; 579 580 if (mem_cgroup_disabled()) 581 return; 582 583 /* 584 * There is no reclaim protection applied to a targeted reclaim. 585 * We are special casing this specific case here because 586 * mem_cgroup_protected calculation is not robust enough to keep 587 * the protection invariant for calculated effective values for 588 * parallel reclaimers with different reclaim target. This is 589 * especially a problem for tail memcgs (as they have pages on LRU) 590 * which would want to have effective values 0 for targeted reclaim 591 * but a different value for external reclaim. 592 * 593 * Example 594 * Let's have global and A's reclaim in parallel: 595 * | 596 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 597 * |\ 598 * | C (low = 1G, usage = 2.5G) 599 * B (low = 1G, usage = 0.5G) 600 * 601 * For the global reclaim 602 * A.elow = A.low 603 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 604 * C.elow = min(C.usage, C.low) 605 * 606 * With the effective values resetting we have A reclaim 607 * A.elow = 0 608 * B.elow = B.low 609 * C.elow = C.low 610 * 611 * If the global reclaim races with A's reclaim then 612 * B.elow = C.elow = 0 because children_low_usage > A.elow) 613 * is possible and reclaiming B would be violating the protection. 614 * 615 */ 616 if (root == memcg) 617 return; 618 619 *min = READ_ONCE(memcg->memory.emin); 620 *low = READ_ONCE(memcg->memory.elow); 621 } 622 623 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 624 struct mem_cgroup *memcg); 625 626 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 627 struct mem_cgroup *memcg) 628 { 629 /* 630 * The root memcg doesn't account charges, and doesn't support 631 * protection. The target memcg's protection is ignored, see 632 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 633 */ 634 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 635 memcg == target; 636 } 637 638 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 639 struct mem_cgroup *memcg) 640 { 641 if (mem_cgroup_unprotected(target, memcg)) 642 return false; 643 644 return READ_ONCE(memcg->memory.elow) >= 645 page_counter_read(&memcg->memory); 646 } 647 648 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 649 struct mem_cgroup *memcg) 650 { 651 if (mem_cgroup_unprotected(target, memcg)) 652 return false; 653 654 return READ_ONCE(memcg->memory.emin) >= 655 page_counter_read(&memcg->memory); 656 } 657 658 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 659 660 /** 661 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 662 * @folio: Folio to charge. 663 * @mm: mm context of the allocating task. 664 * @gfp: Reclaim mode. 665 * 666 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 667 * pages according to @gfp if necessary. If @mm is NULL, try to 668 * charge to the active memcg. 669 * 670 * Do not use this for folios allocated for swapin. 671 * 672 * Return: 0 on success. Otherwise, an error code is returned. 673 */ 674 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 675 gfp_t gfp) 676 { 677 if (mem_cgroup_disabled()) 678 return 0; 679 return __mem_cgroup_charge(folio, mm, gfp); 680 } 681 682 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 683 gfp_t gfp, swp_entry_t entry); 684 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 685 686 void __mem_cgroup_uncharge(struct folio *folio); 687 688 /** 689 * mem_cgroup_uncharge - Uncharge a folio. 690 * @folio: Folio to uncharge. 691 * 692 * Uncharge a folio previously charged with mem_cgroup_charge(). 693 */ 694 static inline void mem_cgroup_uncharge(struct folio *folio) 695 { 696 if (mem_cgroup_disabled()) 697 return; 698 __mem_cgroup_uncharge(folio); 699 } 700 701 void __mem_cgroup_uncharge_list(struct list_head *page_list); 702 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 703 { 704 if (mem_cgroup_disabled()) 705 return; 706 __mem_cgroup_uncharge_list(page_list); 707 } 708 709 void mem_cgroup_migrate(struct folio *old, struct folio *new); 710 711 /** 712 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 713 * @memcg: memcg of the wanted lruvec 714 * @pgdat: pglist_data 715 * 716 * Returns the lru list vector holding pages for a given @memcg & 717 * @pgdat combination. This can be the node lruvec, if the memory 718 * controller is disabled. 719 */ 720 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 721 struct pglist_data *pgdat) 722 { 723 struct mem_cgroup_per_node *mz; 724 struct lruvec *lruvec; 725 726 if (mem_cgroup_disabled()) { 727 lruvec = &pgdat->__lruvec; 728 goto out; 729 } 730 731 if (!memcg) 732 memcg = root_mem_cgroup; 733 734 mz = memcg->nodeinfo[pgdat->node_id]; 735 lruvec = &mz->lruvec; 736 out: 737 /* 738 * Since a node can be onlined after the mem_cgroup was created, 739 * we have to be prepared to initialize lruvec->pgdat here; 740 * and if offlined then reonlined, we need to reinitialize it. 741 */ 742 if (unlikely(lruvec->pgdat != pgdat)) 743 lruvec->pgdat = pgdat; 744 return lruvec; 745 } 746 747 /** 748 * folio_lruvec - return lruvec for isolating/putting an LRU folio 749 * @folio: Pointer to the folio. 750 * 751 * This function relies on folio->mem_cgroup being stable. 752 */ 753 static inline struct lruvec *folio_lruvec(struct folio *folio) 754 { 755 struct mem_cgroup *memcg = folio_memcg(folio); 756 757 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 758 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 759 } 760 761 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 762 763 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 764 765 struct lruvec *folio_lruvec_lock(struct folio *folio); 766 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 767 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 768 unsigned long *flags); 769 770 #ifdef CONFIG_DEBUG_VM 771 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 772 #else 773 static inline 774 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 775 { 776 } 777 #endif 778 779 static inline 780 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 781 return css ? container_of(css, struct mem_cgroup, css) : NULL; 782 } 783 784 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 785 { 786 return percpu_ref_tryget(&objcg->refcnt); 787 } 788 789 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 790 { 791 percpu_ref_get(&objcg->refcnt); 792 } 793 794 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 795 unsigned long nr) 796 { 797 percpu_ref_get_many(&objcg->refcnt, nr); 798 } 799 800 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 801 { 802 percpu_ref_put(&objcg->refcnt); 803 } 804 805 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 806 { 807 return !memcg || css_tryget(&memcg->css); 808 } 809 810 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 811 { 812 if (memcg) 813 css_put(&memcg->css); 814 } 815 816 #define mem_cgroup_from_counter(counter, member) \ 817 container_of(counter, struct mem_cgroup, member) 818 819 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 820 struct mem_cgroup *, 821 struct mem_cgroup_reclaim_cookie *); 822 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 823 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 824 int (*)(struct task_struct *, void *), void *arg); 825 826 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 827 { 828 if (mem_cgroup_disabled()) 829 return 0; 830 831 return memcg->id.id; 832 } 833 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 834 835 #ifdef CONFIG_SHRINKER_DEBUG 836 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 837 { 838 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 839 } 840 841 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 842 #endif 843 844 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 845 { 846 return mem_cgroup_from_css(seq_css(m)); 847 } 848 849 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 850 { 851 struct mem_cgroup_per_node *mz; 852 853 if (mem_cgroup_disabled()) 854 return NULL; 855 856 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 857 return mz->memcg; 858 } 859 860 /** 861 * parent_mem_cgroup - find the accounting parent of a memcg 862 * @memcg: memcg whose parent to find 863 * 864 * Returns the parent memcg, or NULL if this is the root or the memory 865 * controller is in legacy no-hierarchy mode. 866 */ 867 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 868 { 869 return mem_cgroup_from_css(memcg->css.parent); 870 } 871 872 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 873 struct mem_cgroup *root) 874 { 875 if (root == memcg) 876 return true; 877 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 878 } 879 880 static inline bool mm_match_cgroup(struct mm_struct *mm, 881 struct mem_cgroup *memcg) 882 { 883 struct mem_cgroup *task_memcg; 884 bool match = false; 885 886 rcu_read_lock(); 887 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 888 if (task_memcg) 889 match = mem_cgroup_is_descendant(task_memcg, memcg); 890 rcu_read_unlock(); 891 return match; 892 } 893 894 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 895 ino_t page_cgroup_ino(struct page *page); 896 897 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 898 { 899 if (mem_cgroup_disabled()) 900 return true; 901 return !!(memcg->css.flags & CSS_ONLINE); 902 } 903 904 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 905 int zid, int nr_pages); 906 907 static inline 908 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 909 enum lru_list lru, int zone_idx) 910 { 911 struct mem_cgroup_per_node *mz; 912 913 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 914 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 915 } 916 917 void mem_cgroup_handle_over_high(void); 918 919 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 920 921 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 922 923 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 924 struct task_struct *p); 925 926 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 927 928 static inline void mem_cgroup_enter_user_fault(void) 929 { 930 WARN_ON(current->in_user_fault); 931 current->in_user_fault = 1; 932 } 933 934 static inline void mem_cgroup_exit_user_fault(void) 935 { 936 WARN_ON(!current->in_user_fault); 937 current->in_user_fault = 0; 938 } 939 940 static inline bool task_in_memcg_oom(struct task_struct *p) 941 { 942 return p->memcg_in_oom; 943 } 944 945 bool mem_cgroup_oom_synchronize(bool wait); 946 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 947 struct mem_cgroup *oom_domain); 948 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 949 950 void folio_memcg_lock(struct folio *folio); 951 void folio_memcg_unlock(struct folio *folio); 952 953 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 954 955 /* try to stablize folio_memcg() for all the pages in a memcg */ 956 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 957 { 958 rcu_read_lock(); 959 960 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) 961 return true; 962 963 rcu_read_unlock(); 964 return false; 965 } 966 967 static inline void mem_cgroup_unlock_pages(void) 968 { 969 rcu_read_unlock(); 970 } 971 972 /* idx can be of type enum memcg_stat_item or node_stat_item */ 973 static inline void mod_memcg_state(struct mem_cgroup *memcg, 974 int idx, int val) 975 { 976 unsigned long flags; 977 978 local_irq_save(flags); 979 __mod_memcg_state(memcg, idx, val); 980 local_irq_restore(flags); 981 } 982 983 static inline void mod_memcg_page_state(struct page *page, 984 int idx, int val) 985 { 986 struct mem_cgroup *memcg; 987 988 if (mem_cgroup_disabled()) 989 return; 990 991 rcu_read_lock(); 992 memcg = page_memcg(page); 993 if (memcg) 994 mod_memcg_state(memcg, idx, val); 995 rcu_read_unlock(); 996 } 997 998 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 999 1000 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1001 enum node_stat_item idx) 1002 { 1003 struct mem_cgroup_per_node *pn; 1004 long x; 1005 1006 if (mem_cgroup_disabled()) 1007 return node_page_state(lruvec_pgdat(lruvec), idx); 1008 1009 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1010 x = READ_ONCE(pn->lruvec_stats.state[idx]); 1011 #ifdef CONFIG_SMP 1012 if (x < 0) 1013 x = 0; 1014 #endif 1015 return x; 1016 } 1017 1018 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1019 enum node_stat_item idx) 1020 { 1021 struct mem_cgroup_per_node *pn; 1022 long x = 0; 1023 int cpu; 1024 1025 if (mem_cgroup_disabled()) 1026 return node_page_state(lruvec_pgdat(lruvec), idx); 1027 1028 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1029 for_each_possible_cpu(cpu) 1030 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 1031 #ifdef CONFIG_SMP 1032 if (x < 0) 1033 x = 0; 1034 #endif 1035 return x; 1036 } 1037 1038 void mem_cgroup_flush_stats(void); 1039 void mem_cgroup_flush_stats_ratelimited(void); 1040 1041 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1042 int val); 1043 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1044 1045 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1046 int val) 1047 { 1048 unsigned long flags; 1049 1050 local_irq_save(flags); 1051 __mod_lruvec_kmem_state(p, idx, val); 1052 local_irq_restore(flags); 1053 } 1054 1055 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1056 enum node_stat_item idx, int val) 1057 { 1058 unsigned long flags; 1059 1060 local_irq_save(flags); 1061 __mod_memcg_lruvec_state(lruvec, idx, val); 1062 local_irq_restore(flags); 1063 } 1064 1065 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1066 unsigned long count); 1067 1068 static inline void count_memcg_events(struct mem_cgroup *memcg, 1069 enum vm_event_item idx, 1070 unsigned long count) 1071 { 1072 unsigned long flags; 1073 1074 local_irq_save(flags); 1075 __count_memcg_events(memcg, idx, count); 1076 local_irq_restore(flags); 1077 } 1078 1079 static inline void count_memcg_page_event(struct page *page, 1080 enum vm_event_item idx) 1081 { 1082 struct mem_cgroup *memcg = page_memcg(page); 1083 1084 if (memcg) 1085 count_memcg_events(memcg, idx, 1); 1086 } 1087 1088 static inline void count_memcg_folio_events(struct folio *folio, 1089 enum vm_event_item idx, unsigned long nr) 1090 { 1091 struct mem_cgroup *memcg = folio_memcg(folio); 1092 1093 if (memcg) 1094 count_memcg_events(memcg, idx, nr); 1095 } 1096 1097 static inline void count_memcg_event_mm(struct mm_struct *mm, 1098 enum vm_event_item idx) 1099 { 1100 struct mem_cgroup *memcg; 1101 1102 if (mem_cgroup_disabled()) 1103 return; 1104 1105 rcu_read_lock(); 1106 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1107 if (likely(memcg)) 1108 count_memcg_events(memcg, idx, 1); 1109 rcu_read_unlock(); 1110 } 1111 1112 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1113 enum memcg_memory_event event) 1114 { 1115 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1116 event == MEMCG_SWAP_FAIL; 1117 1118 atomic_long_inc(&memcg->memory_events_local[event]); 1119 if (!swap_event) 1120 cgroup_file_notify(&memcg->events_local_file); 1121 1122 do { 1123 atomic_long_inc(&memcg->memory_events[event]); 1124 if (swap_event) 1125 cgroup_file_notify(&memcg->swap_events_file); 1126 else 1127 cgroup_file_notify(&memcg->events_file); 1128 1129 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1130 break; 1131 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1132 break; 1133 } while ((memcg = parent_mem_cgroup(memcg)) && 1134 !mem_cgroup_is_root(memcg)); 1135 } 1136 1137 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1138 enum memcg_memory_event event) 1139 { 1140 struct mem_cgroup *memcg; 1141 1142 if (mem_cgroup_disabled()) 1143 return; 1144 1145 rcu_read_lock(); 1146 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1147 if (likely(memcg)) 1148 memcg_memory_event(memcg, event); 1149 rcu_read_unlock(); 1150 } 1151 1152 void split_page_memcg(struct page *head, unsigned int nr); 1153 1154 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1155 gfp_t gfp_mask, 1156 unsigned long *total_scanned); 1157 1158 #else /* CONFIG_MEMCG */ 1159 1160 #define MEM_CGROUP_ID_SHIFT 0 1161 #define MEM_CGROUP_ID_MAX 0 1162 1163 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1164 { 1165 return NULL; 1166 } 1167 1168 static inline struct mem_cgroup *page_memcg(struct page *page) 1169 { 1170 return NULL; 1171 } 1172 1173 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1174 { 1175 WARN_ON_ONCE(!rcu_read_lock_held()); 1176 return NULL; 1177 } 1178 1179 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1180 { 1181 return NULL; 1182 } 1183 1184 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1185 { 1186 return NULL; 1187 } 1188 1189 static inline bool folio_memcg_kmem(struct folio *folio) 1190 { 1191 return false; 1192 } 1193 1194 static inline bool PageMemcgKmem(struct page *page) 1195 { 1196 return false; 1197 } 1198 1199 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1200 { 1201 return true; 1202 } 1203 1204 static inline bool mem_cgroup_disabled(void) 1205 { 1206 return true; 1207 } 1208 1209 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1210 enum memcg_memory_event event) 1211 { 1212 } 1213 1214 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1215 enum memcg_memory_event event) 1216 { 1217 } 1218 1219 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1220 struct mem_cgroup *memcg, 1221 unsigned long *min, 1222 unsigned long *low) 1223 { 1224 *min = *low = 0; 1225 } 1226 1227 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1228 struct mem_cgroup *memcg) 1229 { 1230 } 1231 1232 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1233 struct mem_cgroup *memcg) 1234 { 1235 return true; 1236 } 1237 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1238 struct mem_cgroup *memcg) 1239 { 1240 return false; 1241 } 1242 1243 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1244 struct mem_cgroup *memcg) 1245 { 1246 return false; 1247 } 1248 1249 static inline int mem_cgroup_charge(struct folio *folio, 1250 struct mm_struct *mm, gfp_t gfp) 1251 { 1252 return 0; 1253 } 1254 1255 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1256 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1257 { 1258 return 0; 1259 } 1260 1261 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1262 { 1263 } 1264 1265 static inline void mem_cgroup_uncharge(struct folio *folio) 1266 { 1267 } 1268 1269 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1270 { 1271 } 1272 1273 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1274 { 1275 } 1276 1277 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1278 struct pglist_data *pgdat) 1279 { 1280 return &pgdat->__lruvec; 1281 } 1282 1283 static inline struct lruvec *folio_lruvec(struct folio *folio) 1284 { 1285 struct pglist_data *pgdat = folio_pgdat(folio); 1286 return &pgdat->__lruvec; 1287 } 1288 1289 static inline 1290 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1291 { 1292 } 1293 1294 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1295 { 1296 return NULL; 1297 } 1298 1299 static inline bool mm_match_cgroup(struct mm_struct *mm, 1300 struct mem_cgroup *memcg) 1301 { 1302 return true; 1303 } 1304 1305 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1306 { 1307 return NULL; 1308 } 1309 1310 static inline 1311 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1312 { 1313 return NULL; 1314 } 1315 1316 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1317 { 1318 } 1319 1320 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1321 { 1322 return true; 1323 } 1324 1325 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1326 { 1327 } 1328 1329 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1330 { 1331 struct pglist_data *pgdat = folio_pgdat(folio); 1332 1333 spin_lock(&pgdat->__lruvec.lru_lock); 1334 return &pgdat->__lruvec; 1335 } 1336 1337 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1338 { 1339 struct pglist_data *pgdat = folio_pgdat(folio); 1340 1341 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1342 return &pgdat->__lruvec; 1343 } 1344 1345 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1346 unsigned long *flagsp) 1347 { 1348 struct pglist_data *pgdat = folio_pgdat(folio); 1349 1350 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1351 return &pgdat->__lruvec; 1352 } 1353 1354 static inline struct mem_cgroup * 1355 mem_cgroup_iter(struct mem_cgroup *root, 1356 struct mem_cgroup *prev, 1357 struct mem_cgroup_reclaim_cookie *reclaim) 1358 { 1359 return NULL; 1360 } 1361 1362 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1363 struct mem_cgroup *prev) 1364 { 1365 } 1366 1367 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1368 int (*fn)(struct task_struct *, void *), void *arg) 1369 { 1370 } 1371 1372 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1373 { 1374 return 0; 1375 } 1376 1377 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1378 { 1379 WARN_ON_ONCE(id); 1380 /* XXX: This should always return root_mem_cgroup */ 1381 return NULL; 1382 } 1383 1384 #ifdef CONFIG_SHRINKER_DEBUG 1385 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1386 { 1387 return 0; 1388 } 1389 1390 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1391 { 1392 return NULL; 1393 } 1394 #endif 1395 1396 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1397 { 1398 return NULL; 1399 } 1400 1401 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1402 { 1403 return NULL; 1404 } 1405 1406 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1407 { 1408 return true; 1409 } 1410 1411 static inline 1412 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1413 enum lru_list lru, int zone_idx) 1414 { 1415 return 0; 1416 } 1417 1418 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1419 { 1420 return 0; 1421 } 1422 1423 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1424 { 1425 return 0; 1426 } 1427 1428 static inline void 1429 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1430 { 1431 } 1432 1433 static inline void 1434 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1435 { 1436 } 1437 1438 static inline void folio_memcg_lock(struct folio *folio) 1439 { 1440 } 1441 1442 static inline void folio_memcg_unlock(struct folio *folio) 1443 { 1444 } 1445 1446 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 1447 { 1448 /* to match folio_memcg_rcu() */ 1449 rcu_read_lock(); 1450 return true; 1451 } 1452 1453 static inline void mem_cgroup_unlock_pages(void) 1454 { 1455 rcu_read_unlock(); 1456 } 1457 1458 static inline void mem_cgroup_handle_over_high(void) 1459 { 1460 } 1461 1462 static inline void mem_cgroup_enter_user_fault(void) 1463 { 1464 } 1465 1466 static inline void mem_cgroup_exit_user_fault(void) 1467 { 1468 } 1469 1470 static inline bool task_in_memcg_oom(struct task_struct *p) 1471 { 1472 return false; 1473 } 1474 1475 static inline bool mem_cgroup_oom_synchronize(bool wait) 1476 { 1477 return false; 1478 } 1479 1480 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1481 struct task_struct *victim, struct mem_cgroup *oom_domain) 1482 { 1483 return NULL; 1484 } 1485 1486 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1487 { 1488 } 1489 1490 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1491 int idx, 1492 int nr) 1493 { 1494 } 1495 1496 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1497 int idx, 1498 int nr) 1499 { 1500 } 1501 1502 static inline void mod_memcg_page_state(struct page *page, 1503 int idx, int val) 1504 { 1505 } 1506 1507 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1508 { 1509 return 0; 1510 } 1511 1512 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1513 enum node_stat_item idx) 1514 { 1515 return node_page_state(lruvec_pgdat(lruvec), idx); 1516 } 1517 1518 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1519 enum node_stat_item idx) 1520 { 1521 return node_page_state(lruvec_pgdat(lruvec), idx); 1522 } 1523 1524 static inline void mem_cgroup_flush_stats(void) 1525 { 1526 } 1527 1528 static inline void mem_cgroup_flush_stats_ratelimited(void) 1529 { 1530 } 1531 1532 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1533 enum node_stat_item idx, int val) 1534 { 1535 } 1536 1537 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1538 int val) 1539 { 1540 struct page *page = virt_to_head_page(p); 1541 1542 __mod_node_page_state(page_pgdat(page), idx, val); 1543 } 1544 1545 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1546 int val) 1547 { 1548 struct page *page = virt_to_head_page(p); 1549 1550 mod_node_page_state(page_pgdat(page), idx, val); 1551 } 1552 1553 static inline void count_memcg_events(struct mem_cgroup *memcg, 1554 enum vm_event_item idx, 1555 unsigned long count) 1556 { 1557 } 1558 1559 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1560 enum vm_event_item idx, 1561 unsigned long count) 1562 { 1563 } 1564 1565 static inline void count_memcg_page_event(struct page *page, 1566 int idx) 1567 { 1568 } 1569 1570 static inline void count_memcg_folio_events(struct folio *folio, 1571 enum vm_event_item idx, unsigned long nr) 1572 { 1573 } 1574 1575 static inline 1576 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1577 { 1578 } 1579 1580 static inline void split_page_memcg(struct page *head, unsigned int nr) 1581 { 1582 } 1583 1584 static inline 1585 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1586 gfp_t gfp_mask, 1587 unsigned long *total_scanned) 1588 { 1589 return 0; 1590 } 1591 #endif /* CONFIG_MEMCG */ 1592 1593 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1594 { 1595 __mod_lruvec_kmem_state(p, idx, 1); 1596 } 1597 1598 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1599 { 1600 __mod_lruvec_kmem_state(p, idx, -1); 1601 } 1602 1603 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1604 { 1605 struct mem_cgroup *memcg; 1606 1607 memcg = lruvec_memcg(lruvec); 1608 if (!memcg) 1609 return NULL; 1610 memcg = parent_mem_cgroup(memcg); 1611 if (!memcg) 1612 return NULL; 1613 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1614 } 1615 1616 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1617 { 1618 spin_unlock(&lruvec->lru_lock); 1619 } 1620 1621 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1622 { 1623 spin_unlock_irq(&lruvec->lru_lock); 1624 } 1625 1626 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1627 unsigned long flags) 1628 { 1629 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1630 } 1631 1632 /* Test requires a stable page->memcg binding, see page_memcg() */ 1633 static inline bool folio_matches_lruvec(struct folio *folio, 1634 struct lruvec *lruvec) 1635 { 1636 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1637 lruvec_memcg(lruvec) == folio_memcg(folio); 1638 } 1639 1640 /* Don't lock again iff page's lruvec locked */ 1641 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1642 struct lruvec *locked_lruvec) 1643 { 1644 if (locked_lruvec) { 1645 if (folio_matches_lruvec(folio, locked_lruvec)) 1646 return locked_lruvec; 1647 1648 unlock_page_lruvec_irq(locked_lruvec); 1649 } 1650 1651 return folio_lruvec_lock_irq(folio); 1652 } 1653 1654 /* Don't lock again iff page's lruvec locked */ 1655 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1656 struct lruvec *locked_lruvec, unsigned long *flags) 1657 { 1658 if (locked_lruvec) { 1659 if (folio_matches_lruvec(folio, locked_lruvec)) 1660 return locked_lruvec; 1661 1662 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1663 } 1664 1665 return folio_lruvec_lock_irqsave(folio, flags); 1666 } 1667 1668 #ifdef CONFIG_CGROUP_WRITEBACK 1669 1670 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1671 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1672 unsigned long *pheadroom, unsigned long *pdirty, 1673 unsigned long *pwriteback); 1674 1675 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1676 struct bdi_writeback *wb); 1677 1678 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1679 struct bdi_writeback *wb) 1680 { 1681 struct mem_cgroup *memcg; 1682 1683 if (mem_cgroup_disabled()) 1684 return; 1685 1686 memcg = folio_memcg(folio); 1687 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1688 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1689 } 1690 1691 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1692 1693 #else /* CONFIG_CGROUP_WRITEBACK */ 1694 1695 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1696 { 1697 return NULL; 1698 } 1699 1700 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1701 unsigned long *pfilepages, 1702 unsigned long *pheadroom, 1703 unsigned long *pdirty, 1704 unsigned long *pwriteback) 1705 { 1706 } 1707 1708 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1709 struct bdi_writeback *wb) 1710 { 1711 } 1712 1713 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1714 { 1715 } 1716 1717 #endif /* CONFIG_CGROUP_WRITEBACK */ 1718 1719 struct sock; 1720 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1721 gfp_t gfp_mask); 1722 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1723 #ifdef CONFIG_MEMCG 1724 extern struct static_key_false memcg_sockets_enabled_key; 1725 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1726 void mem_cgroup_sk_alloc(struct sock *sk); 1727 void mem_cgroup_sk_free(struct sock *sk); 1728 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1729 { 1730 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1731 return true; 1732 do { 1733 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1734 return true; 1735 } while ((memcg = parent_mem_cgroup(memcg))); 1736 return false; 1737 } 1738 1739 int alloc_shrinker_info(struct mem_cgroup *memcg); 1740 void free_shrinker_info(struct mem_cgroup *memcg); 1741 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1742 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1743 #else 1744 #define mem_cgroup_sockets_enabled 0 1745 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1746 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1747 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1748 { 1749 return false; 1750 } 1751 1752 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1753 int nid, int shrinker_id) 1754 { 1755 } 1756 #endif 1757 1758 #ifdef CONFIG_MEMCG_KMEM 1759 bool mem_cgroup_kmem_disabled(void); 1760 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1761 void __memcg_kmem_uncharge_page(struct page *page, int order); 1762 1763 struct obj_cgroup *get_obj_cgroup_from_current(void); 1764 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page); 1765 1766 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1767 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1768 1769 extern struct static_key_false memcg_bpf_enabled_key; 1770 static inline bool memcg_bpf_enabled(void) 1771 { 1772 return static_branch_likely(&memcg_bpf_enabled_key); 1773 } 1774 1775 extern struct static_key_false memcg_kmem_online_key; 1776 1777 static inline bool memcg_kmem_online(void) 1778 { 1779 return static_branch_likely(&memcg_kmem_online_key); 1780 } 1781 1782 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1783 int order) 1784 { 1785 if (memcg_kmem_online()) 1786 return __memcg_kmem_charge_page(page, gfp, order); 1787 return 0; 1788 } 1789 1790 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1791 { 1792 if (memcg_kmem_online()) 1793 __memcg_kmem_uncharge_page(page, order); 1794 } 1795 1796 /* 1797 * A helper for accessing memcg's kmem_id, used for getting 1798 * corresponding LRU lists. 1799 */ 1800 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1801 { 1802 return memcg ? memcg->kmemcg_id : -1; 1803 } 1804 1805 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1806 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1807 1808 static inline void count_objcg_event(struct obj_cgroup *objcg, 1809 enum vm_event_item idx) 1810 { 1811 struct mem_cgroup *memcg; 1812 1813 if (!memcg_kmem_online()) 1814 return; 1815 1816 rcu_read_lock(); 1817 memcg = obj_cgroup_memcg(objcg); 1818 count_memcg_events(memcg, idx, 1); 1819 rcu_read_unlock(); 1820 } 1821 1822 #else 1823 static inline bool mem_cgroup_kmem_disabled(void) 1824 { 1825 return true; 1826 } 1827 1828 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1829 int order) 1830 { 1831 return 0; 1832 } 1833 1834 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1835 { 1836 } 1837 1838 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1839 int order) 1840 { 1841 return 0; 1842 } 1843 1844 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1845 { 1846 } 1847 1848 static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) 1849 { 1850 return NULL; 1851 } 1852 1853 static inline bool memcg_bpf_enabled(void) 1854 { 1855 return false; 1856 } 1857 1858 static inline bool memcg_kmem_online(void) 1859 { 1860 return false; 1861 } 1862 1863 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1864 { 1865 return -1; 1866 } 1867 1868 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1869 { 1870 return NULL; 1871 } 1872 1873 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1874 { 1875 return NULL; 1876 } 1877 1878 static inline void count_objcg_event(struct obj_cgroup *objcg, 1879 enum vm_event_item idx) 1880 { 1881 } 1882 1883 #endif /* CONFIG_MEMCG_KMEM */ 1884 1885 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1886 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1887 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1888 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1889 #else 1890 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1891 { 1892 return true; 1893 } 1894 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1895 size_t size) 1896 { 1897 } 1898 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1899 size_t size) 1900 { 1901 } 1902 #endif 1903 1904 #endif /* _LINUX_MEMCONTROL_H */ 1905