1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_KMEM, 38 MEMCG_ZSWAP_B, 39 MEMCG_ZSWAPPED, 40 MEMCG_NR_STAT, 41 }; 42 43 enum memcg_memory_event { 44 MEMCG_LOW, 45 MEMCG_HIGH, 46 MEMCG_MAX, 47 MEMCG_OOM, 48 MEMCG_OOM_KILL, 49 MEMCG_OOM_GROUP_KILL, 50 MEMCG_SWAP_HIGH, 51 MEMCG_SWAP_MAX, 52 MEMCG_SWAP_FAIL, 53 MEMCG_NR_MEMORY_EVENTS, 54 }; 55 56 struct mem_cgroup_reclaim_cookie { 57 pg_data_t *pgdat; 58 unsigned int generation; 59 }; 60 61 #ifdef CONFIG_MEMCG 62 63 #define MEM_CGROUP_ID_SHIFT 16 64 65 struct mem_cgroup_id { 66 int id; 67 refcount_t ref; 68 }; 69 70 /* 71 * Per memcg event counter is incremented at every pagein/pageout. With THP, 72 * it will be incremented by the number of pages. This counter is used 73 * to trigger some periodic events. This is straightforward and better 74 * than using jiffies etc. to handle periodic memcg event. 75 */ 76 enum mem_cgroup_events_target { 77 MEM_CGROUP_TARGET_THRESH, 78 MEM_CGROUP_TARGET_SOFTLIMIT, 79 MEM_CGROUP_NTARGETS, 80 }; 81 82 struct memcg_vmstats_percpu; 83 struct memcg_vmstats; 84 85 struct mem_cgroup_reclaim_iter { 86 struct mem_cgroup *position; 87 /* scan generation, increased every round-trip */ 88 unsigned int generation; 89 }; 90 91 /* 92 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 93 * shrinkers, which have elements charged to this memcg. 94 */ 95 struct shrinker_info { 96 struct rcu_head rcu; 97 atomic_long_t *nr_deferred; 98 unsigned long *map; 99 int map_nr_max; 100 }; 101 102 struct lruvec_stats_percpu { 103 /* Local (CPU and cgroup) state */ 104 long state[NR_VM_NODE_STAT_ITEMS]; 105 106 /* Delta calculation for lockless upward propagation */ 107 long state_prev[NR_VM_NODE_STAT_ITEMS]; 108 }; 109 110 struct lruvec_stats { 111 /* Aggregated (CPU and subtree) state */ 112 long state[NR_VM_NODE_STAT_ITEMS]; 113 114 /* Pending child counts during tree propagation */ 115 long state_pending[NR_VM_NODE_STAT_ITEMS]; 116 }; 117 118 /* 119 * per-node information in memory controller. 120 */ 121 struct mem_cgroup_per_node { 122 struct lruvec lruvec; 123 124 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 125 struct lruvec_stats lruvec_stats; 126 127 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 128 129 struct mem_cgroup_reclaim_iter iter; 130 131 struct shrinker_info __rcu *shrinker_info; 132 133 struct rb_node tree_node; /* RB tree node */ 134 unsigned long usage_in_excess;/* Set to the value by which */ 135 /* the soft limit is exceeded*/ 136 bool on_tree; 137 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 138 /* use container_of */ 139 }; 140 141 struct mem_cgroup_threshold { 142 struct eventfd_ctx *eventfd; 143 unsigned long threshold; 144 }; 145 146 /* For threshold */ 147 struct mem_cgroup_threshold_ary { 148 /* An array index points to threshold just below or equal to usage. */ 149 int current_threshold; 150 /* Size of entries[] */ 151 unsigned int size; 152 /* Array of thresholds */ 153 struct mem_cgroup_threshold entries[]; 154 }; 155 156 struct mem_cgroup_thresholds { 157 /* Primary thresholds array */ 158 struct mem_cgroup_threshold_ary *primary; 159 /* 160 * Spare threshold array. 161 * This is needed to make mem_cgroup_unregister_event() "never fail". 162 * It must be able to store at least primary->size - 1 entries. 163 */ 164 struct mem_cgroup_threshold_ary *spare; 165 }; 166 167 /* 168 * Remember four most recent foreign writebacks with dirty pages in this 169 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 170 * one in a given round, we're likely to catch it later if it keeps 171 * foreign-dirtying, so a fairly low count should be enough. 172 * 173 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 174 */ 175 #define MEMCG_CGWB_FRN_CNT 4 176 177 struct memcg_cgwb_frn { 178 u64 bdi_id; /* bdi->id of the foreign inode */ 179 int memcg_id; /* memcg->css.id of foreign inode */ 180 u64 at; /* jiffies_64 at the time of dirtying */ 181 struct wb_completion done; /* tracks in-flight foreign writebacks */ 182 }; 183 184 /* 185 * Bucket for arbitrarily byte-sized objects charged to a memory 186 * cgroup. The bucket can be reparented in one piece when the cgroup 187 * is destroyed, without having to round up the individual references 188 * of all live memory objects in the wild. 189 */ 190 struct obj_cgroup { 191 struct percpu_ref refcnt; 192 struct mem_cgroup *memcg; 193 atomic_t nr_charged_bytes; 194 union { 195 struct list_head list; /* protected by objcg_lock */ 196 struct rcu_head rcu; 197 }; 198 }; 199 200 /* 201 * The memory controller data structure. The memory controller controls both 202 * page cache and RSS per cgroup. We would eventually like to provide 203 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 204 * to help the administrator determine what knobs to tune. 205 */ 206 struct mem_cgroup { 207 struct cgroup_subsys_state css; 208 209 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 210 struct mem_cgroup_id id; 211 212 /* Accounted resources */ 213 struct page_counter memory; /* Both v1 & v2 */ 214 215 union { 216 struct page_counter swap; /* v2 only */ 217 struct page_counter memsw; /* v1 only */ 218 }; 219 220 /* Legacy consumer-oriented counters */ 221 struct page_counter kmem; /* v1 only */ 222 struct page_counter tcpmem; /* v1 only */ 223 224 /* Range enforcement for interrupt charges */ 225 struct work_struct high_work; 226 227 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 228 unsigned long zswap_max; 229 #endif 230 231 unsigned long soft_limit; 232 233 /* vmpressure notifications */ 234 struct vmpressure vmpressure; 235 236 /* 237 * Should the OOM killer kill all belonging tasks, had it kill one? 238 */ 239 bool oom_group; 240 241 /* protected by memcg_oom_lock */ 242 bool oom_lock; 243 int under_oom; 244 245 int swappiness; 246 /* OOM-Killer disable */ 247 int oom_kill_disable; 248 249 /* memory.events and memory.events.local */ 250 struct cgroup_file events_file; 251 struct cgroup_file events_local_file; 252 253 /* handle for "memory.swap.events" */ 254 struct cgroup_file swap_events_file; 255 256 /* protect arrays of thresholds */ 257 struct mutex thresholds_lock; 258 259 /* thresholds for memory usage. RCU-protected */ 260 struct mem_cgroup_thresholds thresholds; 261 262 /* thresholds for mem+swap usage. RCU-protected */ 263 struct mem_cgroup_thresholds memsw_thresholds; 264 265 /* For oom notifier event fd */ 266 struct list_head oom_notify; 267 268 /* 269 * Should we move charges of a task when a task is moved into this 270 * mem_cgroup ? And what type of charges should we move ? 271 */ 272 unsigned long move_charge_at_immigrate; 273 /* taken only while moving_account > 0 */ 274 spinlock_t move_lock; 275 unsigned long move_lock_flags; 276 277 CACHELINE_PADDING(_pad1_); 278 279 /* memory.stat */ 280 struct memcg_vmstats *vmstats; 281 282 /* memory.events */ 283 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 284 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 285 286 unsigned long socket_pressure; 287 288 /* Legacy tcp memory accounting */ 289 bool tcpmem_active; 290 int tcpmem_pressure; 291 292 #ifdef CONFIG_MEMCG_KMEM 293 int kmemcg_id; 294 struct obj_cgroup __rcu *objcg; 295 /* list of inherited objcgs, protected by objcg_lock */ 296 struct list_head objcg_list; 297 #endif 298 299 CACHELINE_PADDING(_pad2_); 300 301 /* 302 * set > 0 if pages under this cgroup are moving to other cgroup. 303 */ 304 atomic_t moving_account; 305 struct task_struct *move_lock_task; 306 307 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 308 309 #ifdef CONFIG_CGROUP_WRITEBACK 310 struct list_head cgwb_list; 311 struct wb_domain cgwb_domain; 312 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 313 #endif 314 315 /* List of events which userspace want to receive */ 316 struct list_head event_list; 317 spinlock_t event_list_lock; 318 319 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 320 struct deferred_split deferred_split_queue; 321 #endif 322 323 #ifdef CONFIG_LRU_GEN 324 /* per-memcg mm_struct list */ 325 struct lru_gen_mm_list mm_list; 326 #endif 327 328 struct mem_cgroup_per_node *nodeinfo[]; 329 }; 330 331 /* 332 * size of first charge trial. 333 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 334 * workload. 335 */ 336 #define MEMCG_CHARGE_BATCH 64U 337 338 extern struct mem_cgroup *root_mem_cgroup; 339 340 enum page_memcg_data_flags { 341 /* page->memcg_data is a pointer to an objcgs vector */ 342 MEMCG_DATA_OBJCGS = (1UL << 0), 343 /* page has been accounted as a non-slab kernel page */ 344 MEMCG_DATA_KMEM = (1UL << 1), 345 /* the next bit after the last actual flag */ 346 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 347 }; 348 349 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 350 351 static inline bool folio_memcg_kmem(struct folio *folio); 352 353 /* 354 * After the initialization objcg->memcg is always pointing at 355 * a valid memcg, but can be atomically swapped to the parent memcg. 356 * 357 * The caller must ensure that the returned memcg won't be released: 358 * e.g. acquire the rcu_read_lock or css_set_lock. 359 */ 360 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 361 { 362 return READ_ONCE(objcg->memcg); 363 } 364 365 /* 366 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 367 * @folio: Pointer to the folio. 368 * 369 * Returns a pointer to the memory cgroup associated with the folio, 370 * or NULL. This function assumes that the folio is known to have a 371 * proper memory cgroup pointer. It's not safe to call this function 372 * against some type of folios, e.g. slab folios or ex-slab folios or 373 * kmem folios. 374 */ 375 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 376 { 377 unsigned long memcg_data = folio->memcg_data; 378 379 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 380 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 381 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 382 383 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 384 } 385 386 /* 387 * __folio_objcg - get the object cgroup associated with a kmem folio. 388 * @folio: Pointer to the folio. 389 * 390 * Returns a pointer to the object cgroup associated with the folio, 391 * or NULL. This function assumes that the folio is known to have a 392 * proper object cgroup pointer. It's not safe to call this function 393 * against some type of folios, e.g. slab folios or ex-slab folios or 394 * LRU folios. 395 */ 396 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 397 { 398 unsigned long memcg_data = folio->memcg_data; 399 400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 402 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 403 404 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 405 } 406 407 /* 408 * folio_memcg - Get the memory cgroup associated with a folio. 409 * @folio: Pointer to the folio. 410 * 411 * Returns a pointer to the memory cgroup associated with the folio, 412 * or NULL. This function assumes that the folio is known to have a 413 * proper memory cgroup pointer. It's not safe to call this function 414 * against some type of folios, e.g. slab folios or ex-slab folios. 415 * 416 * For a non-kmem folio any of the following ensures folio and memcg binding 417 * stability: 418 * 419 * - the folio lock 420 * - LRU isolation 421 * - folio_memcg_lock() 422 * - exclusive reference 423 * - mem_cgroup_trylock_pages() 424 * 425 * For a kmem folio a caller should hold an rcu read lock to protect memcg 426 * associated with a kmem folio from being released. 427 */ 428 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 429 { 430 if (folio_memcg_kmem(folio)) 431 return obj_cgroup_memcg(__folio_objcg(folio)); 432 return __folio_memcg(folio); 433 } 434 435 static inline struct mem_cgroup *page_memcg(struct page *page) 436 { 437 return folio_memcg(page_folio(page)); 438 } 439 440 /** 441 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 442 * @folio: Pointer to the folio. 443 * 444 * This function assumes that the folio is known to have a 445 * proper memory cgroup pointer. It's not safe to call this function 446 * against some type of folios, e.g. slab folios or ex-slab folios. 447 * 448 * Return: A pointer to the memory cgroup associated with the folio, 449 * or NULL. 450 */ 451 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 452 { 453 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 454 455 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 456 WARN_ON_ONCE(!rcu_read_lock_held()); 457 458 if (memcg_data & MEMCG_DATA_KMEM) { 459 struct obj_cgroup *objcg; 460 461 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 462 return obj_cgroup_memcg(objcg); 463 } 464 465 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 466 } 467 468 /* 469 * folio_memcg_check - Get the memory cgroup associated with a folio. 470 * @folio: Pointer to the folio. 471 * 472 * Returns a pointer to the memory cgroup associated with the folio, 473 * or NULL. This function unlike folio_memcg() can take any folio 474 * as an argument. It has to be used in cases when it's not known if a folio 475 * has an associated memory cgroup pointer or an object cgroups vector or 476 * an object cgroup. 477 * 478 * For a non-kmem folio any of the following ensures folio and memcg binding 479 * stability: 480 * 481 * - the folio lock 482 * - LRU isolation 483 * - lock_folio_memcg() 484 * - exclusive reference 485 * - mem_cgroup_trylock_pages() 486 * 487 * For a kmem folio a caller should hold an rcu read lock to protect memcg 488 * associated with a kmem folio from being released. 489 */ 490 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 491 { 492 /* 493 * Because folio->memcg_data might be changed asynchronously 494 * for slabs, READ_ONCE() should be used here. 495 */ 496 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 497 498 if (memcg_data & MEMCG_DATA_OBJCGS) 499 return NULL; 500 501 if (memcg_data & MEMCG_DATA_KMEM) { 502 struct obj_cgroup *objcg; 503 504 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 505 return obj_cgroup_memcg(objcg); 506 } 507 508 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 509 } 510 511 static inline struct mem_cgroup *page_memcg_check(struct page *page) 512 { 513 if (PageTail(page)) 514 return NULL; 515 return folio_memcg_check((struct folio *)page); 516 } 517 518 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 519 { 520 struct mem_cgroup *memcg; 521 522 rcu_read_lock(); 523 retry: 524 memcg = obj_cgroup_memcg(objcg); 525 if (unlikely(!css_tryget(&memcg->css))) 526 goto retry; 527 rcu_read_unlock(); 528 529 return memcg; 530 } 531 532 #ifdef CONFIG_MEMCG_KMEM 533 /* 534 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 535 * @folio: Pointer to the folio. 536 * 537 * Checks if the folio has MemcgKmem flag set. The caller must ensure 538 * that the folio has an associated memory cgroup. It's not safe to call 539 * this function against some types of folios, e.g. slab folios. 540 */ 541 static inline bool folio_memcg_kmem(struct folio *folio) 542 { 543 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 544 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 545 return folio->memcg_data & MEMCG_DATA_KMEM; 546 } 547 548 549 #else 550 static inline bool folio_memcg_kmem(struct folio *folio) 551 { 552 return false; 553 } 554 555 #endif 556 557 static inline bool PageMemcgKmem(struct page *page) 558 { 559 return folio_memcg_kmem(page_folio(page)); 560 } 561 562 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 563 { 564 return (memcg == root_mem_cgroup); 565 } 566 567 static inline bool mem_cgroup_disabled(void) 568 { 569 return !cgroup_subsys_enabled(memory_cgrp_subsys); 570 } 571 572 static inline void mem_cgroup_protection(struct mem_cgroup *root, 573 struct mem_cgroup *memcg, 574 unsigned long *min, 575 unsigned long *low) 576 { 577 *min = *low = 0; 578 579 if (mem_cgroup_disabled()) 580 return; 581 582 /* 583 * There is no reclaim protection applied to a targeted reclaim. 584 * We are special casing this specific case here because 585 * mem_cgroup_calculate_protection is not robust enough to keep 586 * the protection invariant for calculated effective values for 587 * parallel reclaimers with different reclaim target. This is 588 * especially a problem for tail memcgs (as they have pages on LRU) 589 * which would want to have effective values 0 for targeted reclaim 590 * but a different value for external reclaim. 591 * 592 * Example 593 * Let's have global and A's reclaim in parallel: 594 * | 595 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 596 * |\ 597 * | C (low = 1G, usage = 2.5G) 598 * B (low = 1G, usage = 0.5G) 599 * 600 * For the global reclaim 601 * A.elow = A.low 602 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 603 * C.elow = min(C.usage, C.low) 604 * 605 * With the effective values resetting we have A reclaim 606 * A.elow = 0 607 * B.elow = B.low 608 * C.elow = C.low 609 * 610 * If the global reclaim races with A's reclaim then 611 * B.elow = C.elow = 0 because children_low_usage > A.elow) 612 * is possible and reclaiming B would be violating the protection. 613 * 614 */ 615 if (root == memcg) 616 return; 617 618 *min = READ_ONCE(memcg->memory.emin); 619 *low = READ_ONCE(memcg->memory.elow); 620 } 621 622 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 623 struct mem_cgroup *memcg); 624 625 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 626 struct mem_cgroup *memcg) 627 { 628 /* 629 * The root memcg doesn't account charges, and doesn't support 630 * protection. The target memcg's protection is ignored, see 631 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 632 */ 633 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 634 memcg == target; 635 } 636 637 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 638 struct mem_cgroup *memcg) 639 { 640 if (mem_cgroup_unprotected(target, memcg)) 641 return false; 642 643 return READ_ONCE(memcg->memory.elow) >= 644 page_counter_read(&memcg->memory); 645 } 646 647 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 648 struct mem_cgroup *memcg) 649 { 650 if (mem_cgroup_unprotected(target, memcg)) 651 return false; 652 653 return READ_ONCE(memcg->memory.emin) >= 654 page_counter_read(&memcg->memory); 655 } 656 657 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 658 659 /** 660 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 661 * @folio: Folio to charge. 662 * @mm: mm context of the allocating task. 663 * @gfp: Reclaim mode. 664 * 665 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 666 * pages according to @gfp if necessary. If @mm is NULL, try to 667 * charge to the active memcg. 668 * 669 * Do not use this for folios allocated for swapin. 670 * 671 * Return: 0 on success. Otherwise, an error code is returned. 672 */ 673 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 674 gfp_t gfp) 675 { 676 if (mem_cgroup_disabled()) 677 return 0; 678 return __mem_cgroup_charge(folio, mm, gfp); 679 } 680 681 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 682 gfp_t gfp, swp_entry_t entry); 683 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 684 685 void __mem_cgroup_uncharge(struct folio *folio); 686 687 /** 688 * mem_cgroup_uncharge - Uncharge a folio. 689 * @folio: Folio to uncharge. 690 * 691 * Uncharge a folio previously charged with mem_cgroup_charge(). 692 */ 693 static inline void mem_cgroup_uncharge(struct folio *folio) 694 { 695 if (mem_cgroup_disabled()) 696 return; 697 __mem_cgroup_uncharge(folio); 698 } 699 700 void __mem_cgroup_uncharge_list(struct list_head *page_list); 701 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 702 { 703 if (mem_cgroup_disabled()) 704 return; 705 __mem_cgroup_uncharge_list(page_list); 706 } 707 708 void mem_cgroup_migrate(struct folio *old, struct folio *new); 709 710 /** 711 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 712 * @memcg: memcg of the wanted lruvec 713 * @pgdat: pglist_data 714 * 715 * Returns the lru list vector holding pages for a given @memcg & 716 * @pgdat combination. This can be the node lruvec, if the memory 717 * controller is disabled. 718 */ 719 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 720 struct pglist_data *pgdat) 721 { 722 struct mem_cgroup_per_node *mz; 723 struct lruvec *lruvec; 724 725 if (mem_cgroup_disabled()) { 726 lruvec = &pgdat->__lruvec; 727 goto out; 728 } 729 730 if (!memcg) 731 memcg = root_mem_cgroup; 732 733 mz = memcg->nodeinfo[pgdat->node_id]; 734 lruvec = &mz->lruvec; 735 out: 736 /* 737 * Since a node can be onlined after the mem_cgroup was created, 738 * we have to be prepared to initialize lruvec->pgdat here; 739 * and if offlined then reonlined, we need to reinitialize it. 740 */ 741 if (unlikely(lruvec->pgdat != pgdat)) 742 lruvec->pgdat = pgdat; 743 return lruvec; 744 } 745 746 /** 747 * folio_lruvec - return lruvec for isolating/putting an LRU folio 748 * @folio: Pointer to the folio. 749 * 750 * This function relies on folio->mem_cgroup being stable. 751 */ 752 static inline struct lruvec *folio_lruvec(struct folio *folio) 753 { 754 struct mem_cgroup *memcg = folio_memcg(folio); 755 756 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 757 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 758 } 759 760 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 761 762 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 763 764 struct lruvec *folio_lruvec_lock(struct folio *folio); 765 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 766 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 767 unsigned long *flags); 768 769 #ifdef CONFIG_DEBUG_VM 770 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 771 #else 772 static inline 773 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 774 { 775 } 776 #endif 777 778 static inline 779 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 780 return css ? container_of(css, struct mem_cgroup, css) : NULL; 781 } 782 783 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 784 { 785 return percpu_ref_tryget(&objcg->refcnt); 786 } 787 788 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 789 { 790 percpu_ref_get(&objcg->refcnt); 791 } 792 793 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 794 unsigned long nr) 795 { 796 percpu_ref_get_many(&objcg->refcnt, nr); 797 } 798 799 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 800 { 801 percpu_ref_put(&objcg->refcnt); 802 } 803 804 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 805 { 806 return !memcg || css_tryget(&memcg->css); 807 } 808 809 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 810 { 811 if (memcg) 812 css_put(&memcg->css); 813 } 814 815 #define mem_cgroup_from_counter(counter, member) \ 816 container_of(counter, struct mem_cgroup, member) 817 818 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 819 struct mem_cgroup *, 820 struct mem_cgroup_reclaim_cookie *); 821 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 822 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 823 int (*)(struct task_struct *, void *), void *arg); 824 825 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 826 { 827 if (mem_cgroup_disabled()) 828 return 0; 829 830 return memcg->id.id; 831 } 832 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 833 834 #ifdef CONFIG_SHRINKER_DEBUG 835 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 836 { 837 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 838 } 839 840 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 841 #endif 842 843 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 844 { 845 return mem_cgroup_from_css(seq_css(m)); 846 } 847 848 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 849 { 850 struct mem_cgroup_per_node *mz; 851 852 if (mem_cgroup_disabled()) 853 return NULL; 854 855 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 856 return mz->memcg; 857 } 858 859 /** 860 * parent_mem_cgroup - find the accounting parent of a memcg 861 * @memcg: memcg whose parent to find 862 * 863 * Returns the parent memcg, or NULL if this is the root. 864 */ 865 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 866 { 867 return mem_cgroup_from_css(memcg->css.parent); 868 } 869 870 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 871 struct mem_cgroup *root) 872 { 873 if (root == memcg) 874 return true; 875 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 876 } 877 878 static inline bool mm_match_cgroup(struct mm_struct *mm, 879 struct mem_cgroup *memcg) 880 { 881 struct mem_cgroup *task_memcg; 882 bool match = false; 883 884 rcu_read_lock(); 885 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 886 if (task_memcg) 887 match = mem_cgroup_is_descendant(task_memcg, memcg); 888 rcu_read_unlock(); 889 return match; 890 } 891 892 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 893 ino_t page_cgroup_ino(struct page *page); 894 895 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 896 { 897 if (mem_cgroup_disabled()) 898 return true; 899 return !!(memcg->css.flags & CSS_ONLINE); 900 } 901 902 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 903 int zid, int nr_pages); 904 905 static inline 906 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 907 enum lru_list lru, int zone_idx) 908 { 909 struct mem_cgroup_per_node *mz; 910 911 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 912 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 913 } 914 915 void mem_cgroup_handle_over_high(void); 916 917 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 918 919 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 920 921 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 922 struct task_struct *p); 923 924 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 925 926 static inline void mem_cgroup_enter_user_fault(void) 927 { 928 WARN_ON(current->in_user_fault); 929 current->in_user_fault = 1; 930 } 931 932 static inline void mem_cgroup_exit_user_fault(void) 933 { 934 WARN_ON(!current->in_user_fault); 935 current->in_user_fault = 0; 936 } 937 938 static inline bool task_in_memcg_oom(struct task_struct *p) 939 { 940 return p->memcg_in_oom; 941 } 942 943 bool mem_cgroup_oom_synchronize(bool wait); 944 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 945 struct mem_cgroup *oom_domain); 946 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 947 948 void folio_memcg_lock(struct folio *folio); 949 void folio_memcg_unlock(struct folio *folio); 950 951 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 952 953 /* try to stablize folio_memcg() for all the pages in a memcg */ 954 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 955 { 956 rcu_read_lock(); 957 958 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) 959 return true; 960 961 rcu_read_unlock(); 962 return false; 963 } 964 965 static inline void mem_cgroup_unlock_pages(void) 966 { 967 rcu_read_unlock(); 968 } 969 970 /* idx can be of type enum memcg_stat_item or node_stat_item */ 971 static inline void mod_memcg_state(struct mem_cgroup *memcg, 972 int idx, int val) 973 { 974 unsigned long flags; 975 976 local_irq_save(flags); 977 __mod_memcg_state(memcg, idx, val); 978 local_irq_restore(flags); 979 } 980 981 static inline void mod_memcg_page_state(struct page *page, 982 int idx, int val) 983 { 984 struct mem_cgroup *memcg; 985 986 if (mem_cgroup_disabled()) 987 return; 988 989 rcu_read_lock(); 990 memcg = page_memcg(page); 991 if (memcg) 992 mod_memcg_state(memcg, idx, val); 993 rcu_read_unlock(); 994 } 995 996 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 997 998 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 999 enum node_stat_item idx) 1000 { 1001 struct mem_cgroup_per_node *pn; 1002 long x; 1003 1004 if (mem_cgroup_disabled()) 1005 return node_page_state(lruvec_pgdat(lruvec), idx); 1006 1007 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1008 x = READ_ONCE(pn->lruvec_stats.state[idx]); 1009 #ifdef CONFIG_SMP 1010 if (x < 0) 1011 x = 0; 1012 #endif 1013 return x; 1014 } 1015 1016 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1017 enum node_stat_item idx) 1018 { 1019 struct mem_cgroup_per_node *pn; 1020 long x = 0; 1021 int cpu; 1022 1023 if (mem_cgroup_disabled()) 1024 return node_page_state(lruvec_pgdat(lruvec), idx); 1025 1026 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1027 for_each_possible_cpu(cpu) 1028 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 1029 #ifdef CONFIG_SMP 1030 if (x < 0) 1031 x = 0; 1032 #endif 1033 return x; 1034 } 1035 1036 void mem_cgroup_flush_stats(void); 1037 void mem_cgroup_flush_stats_ratelimited(void); 1038 1039 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1040 int val); 1041 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1042 1043 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1044 int val) 1045 { 1046 unsigned long flags; 1047 1048 local_irq_save(flags); 1049 __mod_lruvec_kmem_state(p, idx, val); 1050 local_irq_restore(flags); 1051 } 1052 1053 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1054 enum node_stat_item idx, int val) 1055 { 1056 unsigned long flags; 1057 1058 local_irq_save(flags); 1059 __mod_memcg_lruvec_state(lruvec, idx, val); 1060 local_irq_restore(flags); 1061 } 1062 1063 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1064 unsigned long count); 1065 1066 static inline void count_memcg_events(struct mem_cgroup *memcg, 1067 enum vm_event_item idx, 1068 unsigned long count) 1069 { 1070 unsigned long flags; 1071 1072 local_irq_save(flags); 1073 __count_memcg_events(memcg, idx, count); 1074 local_irq_restore(flags); 1075 } 1076 1077 static inline void count_memcg_page_event(struct page *page, 1078 enum vm_event_item idx) 1079 { 1080 struct mem_cgroup *memcg = page_memcg(page); 1081 1082 if (memcg) 1083 count_memcg_events(memcg, idx, 1); 1084 } 1085 1086 static inline void count_memcg_folio_events(struct folio *folio, 1087 enum vm_event_item idx, unsigned long nr) 1088 { 1089 struct mem_cgroup *memcg = folio_memcg(folio); 1090 1091 if (memcg) 1092 count_memcg_events(memcg, idx, nr); 1093 } 1094 1095 static inline void count_memcg_event_mm(struct mm_struct *mm, 1096 enum vm_event_item idx) 1097 { 1098 struct mem_cgroup *memcg; 1099 1100 if (mem_cgroup_disabled()) 1101 return; 1102 1103 rcu_read_lock(); 1104 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1105 if (likely(memcg)) 1106 count_memcg_events(memcg, idx, 1); 1107 rcu_read_unlock(); 1108 } 1109 1110 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1111 enum memcg_memory_event event) 1112 { 1113 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1114 event == MEMCG_SWAP_FAIL; 1115 1116 atomic_long_inc(&memcg->memory_events_local[event]); 1117 if (!swap_event) 1118 cgroup_file_notify(&memcg->events_local_file); 1119 1120 do { 1121 atomic_long_inc(&memcg->memory_events[event]); 1122 if (swap_event) 1123 cgroup_file_notify(&memcg->swap_events_file); 1124 else 1125 cgroup_file_notify(&memcg->events_file); 1126 1127 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1128 break; 1129 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1130 break; 1131 } while ((memcg = parent_mem_cgroup(memcg)) && 1132 !mem_cgroup_is_root(memcg)); 1133 } 1134 1135 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1136 enum memcg_memory_event event) 1137 { 1138 struct mem_cgroup *memcg; 1139 1140 if (mem_cgroup_disabled()) 1141 return; 1142 1143 rcu_read_lock(); 1144 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1145 if (likely(memcg)) 1146 memcg_memory_event(memcg, event); 1147 rcu_read_unlock(); 1148 } 1149 1150 void split_page_memcg(struct page *head, unsigned int nr); 1151 1152 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1153 gfp_t gfp_mask, 1154 unsigned long *total_scanned); 1155 1156 #else /* CONFIG_MEMCG */ 1157 1158 #define MEM_CGROUP_ID_SHIFT 0 1159 1160 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1161 { 1162 return NULL; 1163 } 1164 1165 static inline struct mem_cgroup *page_memcg(struct page *page) 1166 { 1167 return NULL; 1168 } 1169 1170 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1171 { 1172 WARN_ON_ONCE(!rcu_read_lock_held()); 1173 return NULL; 1174 } 1175 1176 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1177 { 1178 return NULL; 1179 } 1180 1181 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1182 { 1183 return NULL; 1184 } 1185 1186 static inline bool folio_memcg_kmem(struct folio *folio) 1187 { 1188 return false; 1189 } 1190 1191 static inline bool PageMemcgKmem(struct page *page) 1192 { 1193 return false; 1194 } 1195 1196 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1197 { 1198 return true; 1199 } 1200 1201 static inline bool mem_cgroup_disabled(void) 1202 { 1203 return true; 1204 } 1205 1206 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1207 enum memcg_memory_event event) 1208 { 1209 } 1210 1211 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1212 enum memcg_memory_event event) 1213 { 1214 } 1215 1216 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1217 struct mem_cgroup *memcg, 1218 unsigned long *min, 1219 unsigned long *low) 1220 { 1221 *min = *low = 0; 1222 } 1223 1224 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1225 struct mem_cgroup *memcg) 1226 { 1227 } 1228 1229 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1230 struct mem_cgroup *memcg) 1231 { 1232 return true; 1233 } 1234 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1235 struct mem_cgroup *memcg) 1236 { 1237 return false; 1238 } 1239 1240 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1241 struct mem_cgroup *memcg) 1242 { 1243 return false; 1244 } 1245 1246 static inline int mem_cgroup_charge(struct folio *folio, 1247 struct mm_struct *mm, gfp_t gfp) 1248 { 1249 return 0; 1250 } 1251 1252 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1253 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1254 { 1255 return 0; 1256 } 1257 1258 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1259 { 1260 } 1261 1262 static inline void mem_cgroup_uncharge(struct folio *folio) 1263 { 1264 } 1265 1266 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1267 { 1268 } 1269 1270 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1271 { 1272 } 1273 1274 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1275 struct pglist_data *pgdat) 1276 { 1277 return &pgdat->__lruvec; 1278 } 1279 1280 static inline struct lruvec *folio_lruvec(struct folio *folio) 1281 { 1282 struct pglist_data *pgdat = folio_pgdat(folio); 1283 return &pgdat->__lruvec; 1284 } 1285 1286 static inline 1287 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1288 { 1289 } 1290 1291 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1292 { 1293 return NULL; 1294 } 1295 1296 static inline bool mm_match_cgroup(struct mm_struct *mm, 1297 struct mem_cgroup *memcg) 1298 { 1299 return true; 1300 } 1301 1302 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1303 { 1304 return NULL; 1305 } 1306 1307 static inline 1308 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1309 { 1310 return NULL; 1311 } 1312 1313 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1314 { 1315 } 1316 1317 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1318 { 1319 return true; 1320 } 1321 1322 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1323 { 1324 } 1325 1326 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1327 { 1328 struct pglist_data *pgdat = folio_pgdat(folio); 1329 1330 spin_lock(&pgdat->__lruvec.lru_lock); 1331 return &pgdat->__lruvec; 1332 } 1333 1334 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1335 { 1336 struct pglist_data *pgdat = folio_pgdat(folio); 1337 1338 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1339 return &pgdat->__lruvec; 1340 } 1341 1342 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1343 unsigned long *flagsp) 1344 { 1345 struct pglist_data *pgdat = folio_pgdat(folio); 1346 1347 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1348 return &pgdat->__lruvec; 1349 } 1350 1351 static inline struct mem_cgroup * 1352 mem_cgroup_iter(struct mem_cgroup *root, 1353 struct mem_cgroup *prev, 1354 struct mem_cgroup_reclaim_cookie *reclaim) 1355 { 1356 return NULL; 1357 } 1358 1359 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1360 struct mem_cgroup *prev) 1361 { 1362 } 1363 1364 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1365 int (*fn)(struct task_struct *, void *), void *arg) 1366 { 1367 } 1368 1369 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1370 { 1371 return 0; 1372 } 1373 1374 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1375 { 1376 WARN_ON_ONCE(id); 1377 /* XXX: This should always return root_mem_cgroup */ 1378 return NULL; 1379 } 1380 1381 #ifdef CONFIG_SHRINKER_DEBUG 1382 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1383 { 1384 return 0; 1385 } 1386 1387 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1388 { 1389 return NULL; 1390 } 1391 #endif 1392 1393 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1394 { 1395 return NULL; 1396 } 1397 1398 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1399 { 1400 return NULL; 1401 } 1402 1403 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1404 { 1405 return true; 1406 } 1407 1408 static inline 1409 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1410 enum lru_list lru, int zone_idx) 1411 { 1412 return 0; 1413 } 1414 1415 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1416 { 1417 return 0; 1418 } 1419 1420 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1421 { 1422 return 0; 1423 } 1424 1425 static inline void 1426 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1427 { 1428 } 1429 1430 static inline void 1431 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1432 { 1433 } 1434 1435 static inline void folio_memcg_lock(struct folio *folio) 1436 { 1437 } 1438 1439 static inline void folio_memcg_unlock(struct folio *folio) 1440 { 1441 } 1442 1443 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 1444 { 1445 /* to match folio_memcg_rcu() */ 1446 rcu_read_lock(); 1447 return true; 1448 } 1449 1450 static inline void mem_cgroup_unlock_pages(void) 1451 { 1452 rcu_read_unlock(); 1453 } 1454 1455 static inline void mem_cgroup_handle_over_high(void) 1456 { 1457 } 1458 1459 static inline void mem_cgroup_enter_user_fault(void) 1460 { 1461 } 1462 1463 static inline void mem_cgroup_exit_user_fault(void) 1464 { 1465 } 1466 1467 static inline bool task_in_memcg_oom(struct task_struct *p) 1468 { 1469 return false; 1470 } 1471 1472 static inline bool mem_cgroup_oom_synchronize(bool wait) 1473 { 1474 return false; 1475 } 1476 1477 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1478 struct task_struct *victim, struct mem_cgroup *oom_domain) 1479 { 1480 return NULL; 1481 } 1482 1483 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1484 { 1485 } 1486 1487 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1488 int idx, 1489 int nr) 1490 { 1491 } 1492 1493 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1494 int idx, 1495 int nr) 1496 { 1497 } 1498 1499 static inline void mod_memcg_page_state(struct page *page, 1500 int idx, int val) 1501 { 1502 } 1503 1504 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1505 { 1506 return 0; 1507 } 1508 1509 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1510 enum node_stat_item idx) 1511 { 1512 return node_page_state(lruvec_pgdat(lruvec), idx); 1513 } 1514 1515 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1516 enum node_stat_item idx) 1517 { 1518 return node_page_state(lruvec_pgdat(lruvec), idx); 1519 } 1520 1521 static inline void mem_cgroup_flush_stats(void) 1522 { 1523 } 1524 1525 static inline void mem_cgroup_flush_stats_ratelimited(void) 1526 { 1527 } 1528 1529 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1530 enum node_stat_item idx, int val) 1531 { 1532 } 1533 1534 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1535 int val) 1536 { 1537 struct page *page = virt_to_head_page(p); 1538 1539 __mod_node_page_state(page_pgdat(page), idx, val); 1540 } 1541 1542 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1543 int val) 1544 { 1545 struct page *page = virt_to_head_page(p); 1546 1547 mod_node_page_state(page_pgdat(page), idx, val); 1548 } 1549 1550 static inline void count_memcg_events(struct mem_cgroup *memcg, 1551 enum vm_event_item idx, 1552 unsigned long count) 1553 { 1554 } 1555 1556 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1557 enum vm_event_item idx, 1558 unsigned long count) 1559 { 1560 } 1561 1562 static inline void count_memcg_page_event(struct page *page, 1563 int idx) 1564 { 1565 } 1566 1567 static inline void count_memcg_folio_events(struct folio *folio, 1568 enum vm_event_item idx, unsigned long nr) 1569 { 1570 } 1571 1572 static inline 1573 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1574 { 1575 } 1576 1577 static inline void split_page_memcg(struct page *head, unsigned int nr) 1578 { 1579 } 1580 1581 static inline 1582 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1583 gfp_t gfp_mask, 1584 unsigned long *total_scanned) 1585 { 1586 return 0; 1587 } 1588 #endif /* CONFIG_MEMCG */ 1589 1590 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1591 { 1592 __mod_lruvec_kmem_state(p, idx, 1); 1593 } 1594 1595 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1596 { 1597 __mod_lruvec_kmem_state(p, idx, -1); 1598 } 1599 1600 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1601 { 1602 struct mem_cgroup *memcg; 1603 1604 memcg = lruvec_memcg(lruvec); 1605 if (!memcg) 1606 return NULL; 1607 memcg = parent_mem_cgroup(memcg); 1608 if (!memcg) 1609 return NULL; 1610 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1611 } 1612 1613 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1614 { 1615 spin_unlock(&lruvec->lru_lock); 1616 } 1617 1618 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1619 { 1620 spin_unlock_irq(&lruvec->lru_lock); 1621 } 1622 1623 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1624 unsigned long flags) 1625 { 1626 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1627 } 1628 1629 /* Test requires a stable page->memcg binding, see page_memcg() */ 1630 static inline bool folio_matches_lruvec(struct folio *folio, 1631 struct lruvec *lruvec) 1632 { 1633 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1634 lruvec_memcg(lruvec) == folio_memcg(folio); 1635 } 1636 1637 /* Don't lock again iff page's lruvec locked */ 1638 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1639 struct lruvec *locked_lruvec) 1640 { 1641 if (locked_lruvec) { 1642 if (folio_matches_lruvec(folio, locked_lruvec)) 1643 return locked_lruvec; 1644 1645 unlock_page_lruvec_irq(locked_lruvec); 1646 } 1647 1648 return folio_lruvec_lock_irq(folio); 1649 } 1650 1651 /* Don't lock again iff page's lruvec locked */ 1652 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1653 struct lruvec *locked_lruvec, unsigned long *flags) 1654 { 1655 if (locked_lruvec) { 1656 if (folio_matches_lruvec(folio, locked_lruvec)) 1657 return locked_lruvec; 1658 1659 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1660 } 1661 1662 return folio_lruvec_lock_irqsave(folio, flags); 1663 } 1664 1665 #ifdef CONFIG_CGROUP_WRITEBACK 1666 1667 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1668 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1669 unsigned long *pheadroom, unsigned long *pdirty, 1670 unsigned long *pwriteback); 1671 1672 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1673 struct bdi_writeback *wb); 1674 1675 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1676 struct bdi_writeback *wb) 1677 { 1678 struct mem_cgroup *memcg; 1679 1680 if (mem_cgroup_disabled()) 1681 return; 1682 1683 memcg = folio_memcg(folio); 1684 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1685 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1686 } 1687 1688 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1689 1690 #else /* CONFIG_CGROUP_WRITEBACK */ 1691 1692 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1693 { 1694 return NULL; 1695 } 1696 1697 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1698 unsigned long *pfilepages, 1699 unsigned long *pheadroom, 1700 unsigned long *pdirty, 1701 unsigned long *pwriteback) 1702 { 1703 } 1704 1705 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1706 struct bdi_writeback *wb) 1707 { 1708 } 1709 1710 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1711 { 1712 } 1713 1714 #endif /* CONFIG_CGROUP_WRITEBACK */ 1715 1716 struct sock; 1717 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1718 gfp_t gfp_mask); 1719 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1720 #ifdef CONFIG_MEMCG 1721 extern struct static_key_false memcg_sockets_enabled_key; 1722 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1723 void mem_cgroup_sk_alloc(struct sock *sk); 1724 void mem_cgroup_sk_free(struct sock *sk); 1725 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1726 { 1727 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1728 return true; 1729 do { 1730 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1731 return true; 1732 } while ((memcg = parent_mem_cgroup(memcg))); 1733 return false; 1734 } 1735 1736 int alloc_shrinker_info(struct mem_cgroup *memcg); 1737 void free_shrinker_info(struct mem_cgroup *memcg); 1738 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1739 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1740 #else 1741 #define mem_cgroup_sockets_enabled 0 1742 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1743 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1744 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1745 { 1746 return false; 1747 } 1748 1749 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1750 int nid, int shrinker_id) 1751 { 1752 } 1753 #endif 1754 1755 #ifdef CONFIG_MEMCG_KMEM 1756 bool mem_cgroup_kmem_disabled(void); 1757 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1758 void __memcg_kmem_uncharge_page(struct page *page, int order); 1759 1760 struct obj_cgroup *get_obj_cgroup_from_current(void); 1761 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); 1762 1763 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1764 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1765 1766 extern struct static_key_false memcg_bpf_enabled_key; 1767 static inline bool memcg_bpf_enabled(void) 1768 { 1769 return static_branch_likely(&memcg_bpf_enabled_key); 1770 } 1771 1772 extern struct static_key_false memcg_kmem_online_key; 1773 1774 static inline bool memcg_kmem_online(void) 1775 { 1776 return static_branch_likely(&memcg_kmem_online_key); 1777 } 1778 1779 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1780 int order) 1781 { 1782 if (memcg_kmem_online()) 1783 return __memcg_kmem_charge_page(page, gfp, order); 1784 return 0; 1785 } 1786 1787 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1788 { 1789 if (memcg_kmem_online()) 1790 __memcg_kmem_uncharge_page(page, order); 1791 } 1792 1793 /* 1794 * A helper for accessing memcg's kmem_id, used for getting 1795 * corresponding LRU lists. 1796 */ 1797 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1798 { 1799 return memcg ? memcg->kmemcg_id : -1; 1800 } 1801 1802 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1803 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1804 1805 static inline void count_objcg_event(struct obj_cgroup *objcg, 1806 enum vm_event_item idx) 1807 { 1808 struct mem_cgroup *memcg; 1809 1810 if (!memcg_kmem_online()) 1811 return; 1812 1813 rcu_read_lock(); 1814 memcg = obj_cgroup_memcg(objcg); 1815 count_memcg_events(memcg, idx, 1); 1816 rcu_read_unlock(); 1817 } 1818 1819 #else 1820 static inline bool mem_cgroup_kmem_disabled(void) 1821 { 1822 return true; 1823 } 1824 1825 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1826 int order) 1827 { 1828 return 0; 1829 } 1830 1831 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1832 { 1833 } 1834 1835 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1836 int order) 1837 { 1838 return 0; 1839 } 1840 1841 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1842 { 1843 } 1844 1845 static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 1846 { 1847 return NULL; 1848 } 1849 1850 static inline bool memcg_bpf_enabled(void) 1851 { 1852 return false; 1853 } 1854 1855 static inline bool memcg_kmem_online(void) 1856 { 1857 return false; 1858 } 1859 1860 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1861 { 1862 return -1; 1863 } 1864 1865 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1866 { 1867 return NULL; 1868 } 1869 1870 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1871 { 1872 return NULL; 1873 } 1874 1875 static inline void count_objcg_event(struct obj_cgroup *objcg, 1876 enum vm_event_item idx) 1877 { 1878 } 1879 1880 #endif /* CONFIG_MEMCG_KMEM */ 1881 1882 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1883 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1884 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1885 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1886 #else 1887 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1888 { 1889 return true; 1890 } 1891 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1892 size_t size) 1893 { 1894 } 1895 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1896 size_t size) 1897 { 1898 } 1899 #endif 1900 1901 #endif /* _LINUX_MEMCONTROL_H */ 1902