1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_KMEM, 38 MEMCG_ZSWAP_B, 39 MEMCG_ZSWAPPED, 40 MEMCG_NR_STAT, 41 }; 42 43 enum memcg_memory_event { 44 MEMCG_LOW, 45 MEMCG_HIGH, 46 MEMCG_MAX, 47 MEMCG_OOM, 48 MEMCG_OOM_KILL, 49 MEMCG_OOM_GROUP_KILL, 50 MEMCG_SWAP_HIGH, 51 MEMCG_SWAP_MAX, 52 MEMCG_SWAP_FAIL, 53 MEMCG_NR_MEMORY_EVENTS, 54 }; 55 56 struct mem_cgroup_reclaim_cookie { 57 pg_data_t *pgdat; 58 unsigned int generation; 59 }; 60 61 #ifdef CONFIG_MEMCG 62 63 #define MEM_CGROUP_ID_SHIFT 16 64 #define MEM_CGROUP_ID_MAX USHRT_MAX 65 66 struct mem_cgroup_id { 67 int id; 68 refcount_t ref; 69 }; 70 71 /* 72 * Per memcg event counter is incremented at every pagein/pageout. With THP, 73 * it will be incremented by the number of pages. This counter is used 74 * to trigger some periodic events. This is straightforward and better 75 * than using jiffies etc. to handle periodic memcg event. 76 */ 77 enum mem_cgroup_events_target { 78 MEM_CGROUP_TARGET_THRESH, 79 MEM_CGROUP_TARGET_SOFTLIMIT, 80 MEM_CGROUP_NTARGETS, 81 }; 82 83 struct memcg_vmstats_percpu; 84 struct memcg_vmstats; 85 86 struct mem_cgroup_reclaim_iter { 87 struct mem_cgroup *position; 88 /* scan generation, increased every round-trip */ 89 unsigned int generation; 90 }; 91 92 /* 93 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 94 * shrinkers, which have elements charged to this memcg. 95 */ 96 struct shrinker_info { 97 struct rcu_head rcu; 98 atomic_long_t *nr_deferred; 99 unsigned long *map; 100 int map_nr_max; 101 }; 102 103 struct lruvec_stats_percpu { 104 /* Local (CPU and cgroup) state */ 105 long state[NR_VM_NODE_STAT_ITEMS]; 106 107 /* Delta calculation for lockless upward propagation */ 108 long state_prev[NR_VM_NODE_STAT_ITEMS]; 109 }; 110 111 struct lruvec_stats { 112 /* Aggregated (CPU and subtree) state */ 113 long state[NR_VM_NODE_STAT_ITEMS]; 114 115 /* Pending child counts during tree propagation */ 116 long state_pending[NR_VM_NODE_STAT_ITEMS]; 117 }; 118 119 /* 120 * per-node information in memory controller. 121 */ 122 struct mem_cgroup_per_node { 123 struct lruvec lruvec; 124 125 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 126 struct lruvec_stats lruvec_stats; 127 128 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 129 130 struct mem_cgroup_reclaim_iter iter; 131 132 struct shrinker_info __rcu *shrinker_info; 133 134 struct rb_node tree_node; /* RB tree node */ 135 unsigned long usage_in_excess;/* Set to the value by which */ 136 /* the soft limit is exceeded*/ 137 bool on_tree; 138 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 139 /* use container_of */ 140 }; 141 142 struct mem_cgroup_threshold { 143 struct eventfd_ctx *eventfd; 144 unsigned long threshold; 145 }; 146 147 /* For threshold */ 148 struct mem_cgroup_threshold_ary { 149 /* An array index points to threshold just below or equal to usage. */ 150 int current_threshold; 151 /* Size of entries[] */ 152 unsigned int size; 153 /* Array of thresholds */ 154 struct mem_cgroup_threshold entries[]; 155 }; 156 157 struct mem_cgroup_thresholds { 158 /* Primary thresholds array */ 159 struct mem_cgroup_threshold_ary *primary; 160 /* 161 * Spare threshold array. 162 * This is needed to make mem_cgroup_unregister_event() "never fail". 163 * It must be able to store at least primary->size - 1 entries. 164 */ 165 struct mem_cgroup_threshold_ary *spare; 166 }; 167 168 /* 169 * Remember four most recent foreign writebacks with dirty pages in this 170 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 171 * one in a given round, we're likely to catch it later if it keeps 172 * foreign-dirtying, so a fairly low count should be enough. 173 * 174 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 175 */ 176 #define MEMCG_CGWB_FRN_CNT 4 177 178 struct memcg_cgwb_frn { 179 u64 bdi_id; /* bdi->id of the foreign inode */ 180 int memcg_id; /* memcg->css.id of foreign inode */ 181 u64 at; /* jiffies_64 at the time of dirtying */ 182 struct wb_completion done; /* tracks in-flight foreign writebacks */ 183 }; 184 185 /* 186 * Bucket for arbitrarily byte-sized objects charged to a memory 187 * cgroup. The bucket can be reparented in one piece when the cgroup 188 * is destroyed, without having to round up the individual references 189 * of all live memory objects in the wild. 190 */ 191 struct obj_cgroup { 192 struct percpu_ref refcnt; 193 struct mem_cgroup *memcg; 194 atomic_t nr_charged_bytes; 195 union { 196 struct list_head list; /* protected by objcg_lock */ 197 struct rcu_head rcu; 198 }; 199 }; 200 201 /* 202 * The memory controller data structure. The memory controller controls both 203 * page cache and RSS per cgroup. We would eventually like to provide 204 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 205 * to help the administrator determine what knobs to tune. 206 */ 207 struct mem_cgroup { 208 struct cgroup_subsys_state css; 209 210 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 211 struct mem_cgroup_id id; 212 213 /* Accounted resources */ 214 struct page_counter memory; /* Both v1 & v2 */ 215 216 union { 217 struct page_counter swap; /* v2 only */ 218 struct page_counter memsw; /* v1 only */ 219 }; 220 221 /* Legacy consumer-oriented counters */ 222 struct page_counter kmem; /* v1 only */ 223 struct page_counter tcpmem; /* v1 only */ 224 225 /* Range enforcement for interrupt charges */ 226 struct work_struct high_work; 227 228 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 229 unsigned long zswap_max; 230 #endif 231 232 unsigned long soft_limit; 233 234 /* vmpressure notifications */ 235 struct vmpressure vmpressure; 236 237 /* 238 * Should the OOM killer kill all belonging tasks, had it kill one? 239 */ 240 bool oom_group; 241 242 /* protected by memcg_oom_lock */ 243 bool oom_lock; 244 int under_oom; 245 246 int swappiness; 247 /* OOM-Killer disable */ 248 int oom_kill_disable; 249 250 /* memory.events and memory.events.local */ 251 struct cgroup_file events_file; 252 struct cgroup_file events_local_file; 253 254 /* handle for "memory.swap.events" */ 255 struct cgroup_file swap_events_file; 256 257 /* protect arrays of thresholds */ 258 struct mutex thresholds_lock; 259 260 /* thresholds for memory usage. RCU-protected */ 261 struct mem_cgroup_thresholds thresholds; 262 263 /* thresholds for mem+swap usage. RCU-protected */ 264 struct mem_cgroup_thresholds memsw_thresholds; 265 266 /* For oom notifier event fd */ 267 struct list_head oom_notify; 268 269 /* 270 * Should we move charges of a task when a task is moved into this 271 * mem_cgroup ? And what type of charges should we move ? 272 */ 273 unsigned long move_charge_at_immigrate; 274 /* taken only while moving_account > 0 */ 275 spinlock_t move_lock; 276 unsigned long move_lock_flags; 277 278 CACHELINE_PADDING(_pad1_); 279 280 /* memory.stat */ 281 struct memcg_vmstats *vmstats; 282 283 /* memory.events */ 284 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 285 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 286 287 /* 288 * Hint of reclaim pressure for socket memroy management. Note 289 * that this indicator should NOT be used in legacy cgroup mode 290 * where socket memory is accounted/charged separately. 291 */ 292 unsigned long socket_pressure; 293 294 /* Legacy tcp memory accounting */ 295 bool tcpmem_active; 296 int tcpmem_pressure; 297 298 #ifdef CONFIG_MEMCG_KMEM 299 int kmemcg_id; 300 struct obj_cgroup __rcu *objcg; 301 /* list of inherited objcgs, protected by objcg_lock */ 302 struct list_head objcg_list; 303 #endif 304 305 CACHELINE_PADDING(_pad2_); 306 307 /* 308 * set > 0 if pages under this cgroup are moving to other cgroup. 309 */ 310 atomic_t moving_account; 311 struct task_struct *move_lock_task; 312 313 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 314 315 #ifdef CONFIG_CGROUP_WRITEBACK 316 struct list_head cgwb_list; 317 struct wb_domain cgwb_domain; 318 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 319 #endif 320 321 /* List of events which userspace want to receive */ 322 struct list_head event_list; 323 spinlock_t event_list_lock; 324 325 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 326 struct deferred_split deferred_split_queue; 327 #endif 328 329 #ifdef CONFIG_LRU_GEN 330 /* per-memcg mm_struct list */ 331 struct lru_gen_mm_list mm_list; 332 #endif 333 334 struct mem_cgroup_per_node *nodeinfo[]; 335 }; 336 337 /* 338 * size of first charge trial. 339 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 340 * workload. 341 */ 342 #define MEMCG_CHARGE_BATCH 64U 343 344 extern struct mem_cgroup *root_mem_cgroup; 345 346 enum page_memcg_data_flags { 347 /* page->memcg_data is a pointer to an objcgs vector */ 348 MEMCG_DATA_OBJCGS = (1UL << 0), 349 /* page has been accounted as a non-slab kernel page */ 350 MEMCG_DATA_KMEM = (1UL << 1), 351 /* the next bit after the last actual flag */ 352 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 353 }; 354 355 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 356 357 static inline bool folio_memcg_kmem(struct folio *folio); 358 359 /* 360 * After the initialization objcg->memcg is always pointing at 361 * a valid memcg, but can be atomically swapped to the parent memcg. 362 * 363 * The caller must ensure that the returned memcg won't be released: 364 * e.g. acquire the rcu_read_lock or css_set_lock. 365 */ 366 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 367 { 368 return READ_ONCE(objcg->memcg); 369 } 370 371 /* 372 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 373 * @folio: Pointer to the folio. 374 * 375 * Returns a pointer to the memory cgroup associated with the folio, 376 * or NULL. This function assumes that the folio is known to have a 377 * proper memory cgroup pointer. It's not safe to call this function 378 * against some type of folios, e.g. slab folios or ex-slab folios or 379 * kmem folios. 380 */ 381 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 382 { 383 unsigned long memcg_data = folio->memcg_data; 384 385 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 386 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 387 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 388 389 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 390 } 391 392 /* 393 * __folio_objcg - get the object cgroup associated with a kmem folio. 394 * @folio: Pointer to the folio. 395 * 396 * Returns a pointer to the object cgroup associated with the folio, 397 * or NULL. This function assumes that the folio is known to have a 398 * proper object cgroup pointer. It's not safe to call this function 399 * against some type of folios, e.g. slab folios or ex-slab folios or 400 * LRU folios. 401 */ 402 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 403 { 404 unsigned long memcg_data = folio->memcg_data; 405 406 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 407 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 408 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 409 410 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 411 } 412 413 /* 414 * folio_memcg - Get the memory cgroup associated with a folio. 415 * @folio: Pointer to the folio. 416 * 417 * Returns a pointer to the memory cgroup associated with the folio, 418 * or NULL. This function assumes that the folio is known to have a 419 * proper memory cgroup pointer. It's not safe to call this function 420 * against some type of folios, e.g. slab folios or ex-slab folios. 421 * 422 * For a non-kmem folio any of the following ensures folio and memcg binding 423 * stability: 424 * 425 * - the folio lock 426 * - LRU isolation 427 * - folio_memcg_lock() 428 * - exclusive reference 429 * - mem_cgroup_trylock_pages() 430 * 431 * For a kmem folio a caller should hold an rcu read lock to protect memcg 432 * associated with a kmem folio from being released. 433 */ 434 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 435 { 436 if (folio_memcg_kmem(folio)) 437 return obj_cgroup_memcg(__folio_objcg(folio)); 438 return __folio_memcg(folio); 439 } 440 441 static inline struct mem_cgroup *page_memcg(struct page *page) 442 { 443 return folio_memcg(page_folio(page)); 444 } 445 446 /** 447 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 448 * @folio: Pointer to the folio. 449 * 450 * This function assumes that the folio is known to have a 451 * proper memory cgroup pointer. It's not safe to call this function 452 * against some type of folios, e.g. slab folios or ex-slab folios. 453 * 454 * Return: A pointer to the memory cgroup associated with the folio, 455 * or NULL. 456 */ 457 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 458 { 459 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 460 461 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 462 WARN_ON_ONCE(!rcu_read_lock_held()); 463 464 if (memcg_data & MEMCG_DATA_KMEM) { 465 struct obj_cgroup *objcg; 466 467 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 468 return obj_cgroup_memcg(objcg); 469 } 470 471 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 472 } 473 474 /* 475 * folio_memcg_check - Get the memory cgroup associated with a folio. 476 * @folio: Pointer to the folio. 477 * 478 * Returns a pointer to the memory cgroup associated with the folio, 479 * or NULL. This function unlike folio_memcg() can take any folio 480 * as an argument. It has to be used in cases when it's not known if a folio 481 * has an associated memory cgroup pointer or an object cgroups vector or 482 * an object cgroup. 483 * 484 * For a non-kmem folio any of the following ensures folio and memcg binding 485 * stability: 486 * 487 * - the folio lock 488 * - LRU isolation 489 * - lock_folio_memcg() 490 * - exclusive reference 491 * - mem_cgroup_trylock_pages() 492 * 493 * For a kmem folio a caller should hold an rcu read lock to protect memcg 494 * associated with a kmem folio from being released. 495 */ 496 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 497 { 498 /* 499 * Because folio->memcg_data might be changed asynchronously 500 * for slabs, READ_ONCE() should be used here. 501 */ 502 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 503 504 if (memcg_data & MEMCG_DATA_OBJCGS) 505 return NULL; 506 507 if (memcg_data & MEMCG_DATA_KMEM) { 508 struct obj_cgroup *objcg; 509 510 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 511 return obj_cgroup_memcg(objcg); 512 } 513 514 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 515 } 516 517 static inline struct mem_cgroup *page_memcg_check(struct page *page) 518 { 519 if (PageTail(page)) 520 return NULL; 521 return folio_memcg_check((struct folio *)page); 522 } 523 524 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 525 { 526 struct mem_cgroup *memcg; 527 528 rcu_read_lock(); 529 retry: 530 memcg = obj_cgroup_memcg(objcg); 531 if (unlikely(!css_tryget(&memcg->css))) 532 goto retry; 533 rcu_read_unlock(); 534 535 return memcg; 536 } 537 538 #ifdef CONFIG_MEMCG_KMEM 539 /* 540 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 541 * @folio: Pointer to the folio. 542 * 543 * Checks if the folio has MemcgKmem flag set. The caller must ensure 544 * that the folio has an associated memory cgroup. It's not safe to call 545 * this function against some types of folios, e.g. slab folios. 546 */ 547 static inline bool folio_memcg_kmem(struct folio *folio) 548 { 549 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 550 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 551 return folio->memcg_data & MEMCG_DATA_KMEM; 552 } 553 554 555 #else 556 static inline bool folio_memcg_kmem(struct folio *folio) 557 { 558 return false; 559 } 560 561 #endif 562 563 static inline bool PageMemcgKmem(struct page *page) 564 { 565 return folio_memcg_kmem(page_folio(page)); 566 } 567 568 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 569 { 570 return (memcg == root_mem_cgroup); 571 } 572 573 static inline bool mem_cgroup_disabled(void) 574 { 575 return !cgroup_subsys_enabled(memory_cgrp_subsys); 576 } 577 578 static inline void mem_cgroup_protection(struct mem_cgroup *root, 579 struct mem_cgroup *memcg, 580 unsigned long *min, 581 unsigned long *low) 582 { 583 *min = *low = 0; 584 585 if (mem_cgroup_disabled()) 586 return; 587 588 /* 589 * There is no reclaim protection applied to a targeted reclaim. 590 * We are special casing this specific case here because 591 * mem_cgroup_protected calculation is not robust enough to keep 592 * the protection invariant for calculated effective values for 593 * parallel reclaimers with different reclaim target. This is 594 * especially a problem for tail memcgs (as they have pages on LRU) 595 * which would want to have effective values 0 for targeted reclaim 596 * but a different value for external reclaim. 597 * 598 * Example 599 * Let's have global and A's reclaim in parallel: 600 * | 601 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 602 * |\ 603 * | C (low = 1G, usage = 2.5G) 604 * B (low = 1G, usage = 0.5G) 605 * 606 * For the global reclaim 607 * A.elow = A.low 608 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 609 * C.elow = min(C.usage, C.low) 610 * 611 * With the effective values resetting we have A reclaim 612 * A.elow = 0 613 * B.elow = B.low 614 * C.elow = C.low 615 * 616 * If the global reclaim races with A's reclaim then 617 * B.elow = C.elow = 0 because children_low_usage > A.elow) 618 * is possible and reclaiming B would be violating the protection. 619 * 620 */ 621 if (root == memcg) 622 return; 623 624 *min = READ_ONCE(memcg->memory.emin); 625 *low = READ_ONCE(memcg->memory.elow); 626 } 627 628 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 629 struct mem_cgroup *memcg); 630 631 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 632 struct mem_cgroup *memcg) 633 { 634 /* 635 * The root memcg doesn't account charges, and doesn't support 636 * protection. The target memcg's protection is ignored, see 637 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 638 */ 639 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 640 memcg == target; 641 } 642 643 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 644 struct mem_cgroup *memcg) 645 { 646 if (mem_cgroup_unprotected(target, memcg)) 647 return false; 648 649 return READ_ONCE(memcg->memory.elow) >= 650 page_counter_read(&memcg->memory); 651 } 652 653 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 654 struct mem_cgroup *memcg) 655 { 656 if (mem_cgroup_unprotected(target, memcg)) 657 return false; 658 659 return READ_ONCE(memcg->memory.emin) >= 660 page_counter_read(&memcg->memory); 661 } 662 663 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 664 665 /** 666 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 667 * @folio: Folio to charge. 668 * @mm: mm context of the allocating task. 669 * @gfp: Reclaim mode. 670 * 671 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 672 * pages according to @gfp if necessary. If @mm is NULL, try to 673 * charge to the active memcg. 674 * 675 * Do not use this for folios allocated for swapin. 676 * 677 * Return: 0 on success. Otherwise, an error code is returned. 678 */ 679 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 680 gfp_t gfp) 681 { 682 if (mem_cgroup_disabled()) 683 return 0; 684 return __mem_cgroup_charge(folio, mm, gfp); 685 } 686 687 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 688 gfp_t gfp, swp_entry_t entry); 689 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 690 691 void __mem_cgroup_uncharge(struct folio *folio); 692 693 /** 694 * mem_cgroup_uncharge - Uncharge a folio. 695 * @folio: Folio to uncharge. 696 * 697 * Uncharge a folio previously charged with mem_cgroup_charge(). 698 */ 699 static inline void mem_cgroup_uncharge(struct folio *folio) 700 { 701 if (mem_cgroup_disabled()) 702 return; 703 __mem_cgroup_uncharge(folio); 704 } 705 706 void __mem_cgroup_uncharge_list(struct list_head *page_list); 707 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 708 { 709 if (mem_cgroup_disabled()) 710 return; 711 __mem_cgroup_uncharge_list(page_list); 712 } 713 714 void mem_cgroup_migrate(struct folio *old, struct folio *new); 715 716 /** 717 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 718 * @memcg: memcg of the wanted lruvec 719 * @pgdat: pglist_data 720 * 721 * Returns the lru list vector holding pages for a given @memcg & 722 * @pgdat combination. This can be the node lruvec, if the memory 723 * controller is disabled. 724 */ 725 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 726 struct pglist_data *pgdat) 727 { 728 struct mem_cgroup_per_node *mz; 729 struct lruvec *lruvec; 730 731 if (mem_cgroup_disabled()) { 732 lruvec = &pgdat->__lruvec; 733 goto out; 734 } 735 736 if (!memcg) 737 memcg = root_mem_cgroup; 738 739 mz = memcg->nodeinfo[pgdat->node_id]; 740 lruvec = &mz->lruvec; 741 out: 742 /* 743 * Since a node can be onlined after the mem_cgroup was created, 744 * we have to be prepared to initialize lruvec->pgdat here; 745 * and if offlined then reonlined, we need to reinitialize it. 746 */ 747 if (unlikely(lruvec->pgdat != pgdat)) 748 lruvec->pgdat = pgdat; 749 return lruvec; 750 } 751 752 /** 753 * folio_lruvec - return lruvec for isolating/putting an LRU folio 754 * @folio: Pointer to the folio. 755 * 756 * This function relies on folio->mem_cgroup being stable. 757 */ 758 static inline struct lruvec *folio_lruvec(struct folio *folio) 759 { 760 struct mem_cgroup *memcg = folio_memcg(folio); 761 762 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 763 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 764 } 765 766 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 767 768 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 769 770 struct lruvec *folio_lruvec_lock(struct folio *folio); 771 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 772 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 773 unsigned long *flags); 774 775 #ifdef CONFIG_DEBUG_VM 776 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 777 #else 778 static inline 779 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 780 { 781 } 782 #endif 783 784 static inline 785 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 786 return css ? container_of(css, struct mem_cgroup, css) : NULL; 787 } 788 789 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 790 { 791 return percpu_ref_tryget(&objcg->refcnt); 792 } 793 794 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 795 { 796 percpu_ref_get(&objcg->refcnt); 797 } 798 799 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 800 unsigned long nr) 801 { 802 percpu_ref_get_many(&objcg->refcnt, nr); 803 } 804 805 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 806 { 807 percpu_ref_put(&objcg->refcnt); 808 } 809 810 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 811 { 812 return !memcg || css_tryget(&memcg->css); 813 } 814 815 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 816 { 817 if (memcg) 818 css_put(&memcg->css); 819 } 820 821 #define mem_cgroup_from_counter(counter, member) \ 822 container_of(counter, struct mem_cgroup, member) 823 824 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 825 struct mem_cgroup *, 826 struct mem_cgroup_reclaim_cookie *); 827 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 828 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 829 int (*)(struct task_struct *, void *), void *arg); 830 831 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 832 { 833 if (mem_cgroup_disabled()) 834 return 0; 835 836 return memcg->id.id; 837 } 838 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 839 840 #ifdef CONFIG_SHRINKER_DEBUG 841 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 842 { 843 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 844 } 845 846 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 847 #endif 848 849 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 850 { 851 return mem_cgroup_from_css(seq_css(m)); 852 } 853 854 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 855 { 856 struct mem_cgroup_per_node *mz; 857 858 if (mem_cgroup_disabled()) 859 return NULL; 860 861 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 862 return mz->memcg; 863 } 864 865 /** 866 * parent_mem_cgroup - find the accounting parent of a memcg 867 * @memcg: memcg whose parent to find 868 * 869 * Returns the parent memcg, or NULL if this is the root or the memory 870 * controller is in legacy no-hierarchy mode. 871 */ 872 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 873 { 874 return mem_cgroup_from_css(memcg->css.parent); 875 } 876 877 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 878 struct mem_cgroup *root) 879 { 880 if (root == memcg) 881 return true; 882 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 883 } 884 885 static inline bool mm_match_cgroup(struct mm_struct *mm, 886 struct mem_cgroup *memcg) 887 { 888 struct mem_cgroup *task_memcg; 889 bool match = false; 890 891 rcu_read_lock(); 892 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 893 if (task_memcg) 894 match = mem_cgroup_is_descendant(task_memcg, memcg); 895 rcu_read_unlock(); 896 return match; 897 } 898 899 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 900 ino_t page_cgroup_ino(struct page *page); 901 902 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 903 { 904 if (mem_cgroup_disabled()) 905 return true; 906 return !!(memcg->css.flags & CSS_ONLINE); 907 } 908 909 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 910 int zid, int nr_pages); 911 912 static inline 913 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 914 enum lru_list lru, int zone_idx) 915 { 916 struct mem_cgroup_per_node *mz; 917 918 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 919 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 920 } 921 922 void mem_cgroup_handle_over_high(void); 923 924 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 925 926 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 927 928 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 929 struct task_struct *p); 930 931 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 932 933 static inline void mem_cgroup_enter_user_fault(void) 934 { 935 WARN_ON(current->in_user_fault); 936 current->in_user_fault = 1; 937 } 938 939 static inline void mem_cgroup_exit_user_fault(void) 940 { 941 WARN_ON(!current->in_user_fault); 942 current->in_user_fault = 0; 943 } 944 945 static inline bool task_in_memcg_oom(struct task_struct *p) 946 { 947 return p->memcg_in_oom; 948 } 949 950 bool mem_cgroup_oom_synchronize(bool wait); 951 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 952 struct mem_cgroup *oom_domain); 953 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 954 955 void folio_memcg_lock(struct folio *folio); 956 void folio_memcg_unlock(struct folio *folio); 957 958 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 959 960 /* try to stablize folio_memcg() for all the pages in a memcg */ 961 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 962 { 963 rcu_read_lock(); 964 965 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) 966 return true; 967 968 rcu_read_unlock(); 969 return false; 970 } 971 972 static inline void mem_cgroup_unlock_pages(void) 973 { 974 rcu_read_unlock(); 975 } 976 977 /* idx can be of type enum memcg_stat_item or node_stat_item */ 978 static inline void mod_memcg_state(struct mem_cgroup *memcg, 979 int idx, int val) 980 { 981 unsigned long flags; 982 983 local_irq_save(flags); 984 __mod_memcg_state(memcg, idx, val); 985 local_irq_restore(flags); 986 } 987 988 static inline void mod_memcg_page_state(struct page *page, 989 int idx, int val) 990 { 991 struct mem_cgroup *memcg; 992 993 if (mem_cgroup_disabled()) 994 return; 995 996 rcu_read_lock(); 997 memcg = page_memcg(page); 998 if (memcg) 999 mod_memcg_state(memcg, idx, val); 1000 rcu_read_unlock(); 1001 } 1002 1003 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 1004 1005 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1006 enum node_stat_item idx) 1007 { 1008 struct mem_cgroup_per_node *pn; 1009 long x; 1010 1011 if (mem_cgroup_disabled()) 1012 return node_page_state(lruvec_pgdat(lruvec), idx); 1013 1014 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1015 x = READ_ONCE(pn->lruvec_stats.state[idx]); 1016 #ifdef CONFIG_SMP 1017 if (x < 0) 1018 x = 0; 1019 #endif 1020 return x; 1021 } 1022 1023 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1024 enum node_stat_item idx) 1025 { 1026 struct mem_cgroup_per_node *pn; 1027 long x = 0; 1028 int cpu; 1029 1030 if (mem_cgroup_disabled()) 1031 return node_page_state(lruvec_pgdat(lruvec), idx); 1032 1033 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1034 for_each_possible_cpu(cpu) 1035 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 1036 #ifdef CONFIG_SMP 1037 if (x < 0) 1038 x = 0; 1039 #endif 1040 return x; 1041 } 1042 1043 void mem_cgroup_flush_stats(void); 1044 void mem_cgroup_flush_stats_ratelimited(void); 1045 1046 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1047 int val); 1048 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1049 1050 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1051 int val) 1052 { 1053 unsigned long flags; 1054 1055 local_irq_save(flags); 1056 __mod_lruvec_kmem_state(p, idx, val); 1057 local_irq_restore(flags); 1058 } 1059 1060 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1061 enum node_stat_item idx, int val) 1062 { 1063 unsigned long flags; 1064 1065 local_irq_save(flags); 1066 __mod_memcg_lruvec_state(lruvec, idx, val); 1067 local_irq_restore(flags); 1068 } 1069 1070 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1071 unsigned long count); 1072 1073 static inline void count_memcg_events(struct mem_cgroup *memcg, 1074 enum vm_event_item idx, 1075 unsigned long count) 1076 { 1077 unsigned long flags; 1078 1079 local_irq_save(flags); 1080 __count_memcg_events(memcg, idx, count); 1081 local_irq_restore(flags); 1082 } 1083 1084 static inline void count_memcg_page_event(struct page *page, 1085 enum vm_event_item idx) 1086 { 1087 struct mem_cgroup *memcg = page_memcg(page); 1088 1089 if (memcg) 1090 count_memcg_events(memcg, idx, 1); 1091 } 1092 1093 static inline void count_memcg_folio_events(struct folio *folio, 1094 enum vm_event_item idx, unsigned long nr) 1095 { 1096 struct mem_cgroup *memcg = folio_memcg(folio); 1097 1098 if (memcg) 1099 count_memcg_events(memcg, idx, nr); 1100 } 1101 1102 static inline void count_memcg_event_mm(struct mm_struct *mm, 1103 enum vm_event_item idx) 1104 { 1105 struct mem_cgroup *memcg; 1106 1107 if (mem_cgroup_disabled()) 1108 return; 1109 1110 rcu_read_lock(); 1111 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1112 if (likely(memcg)) 1113 count_memcg_events(memcg, idx, 1); 1114 rcu_read_unlock(); 1115 } 1116 1117 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1118 enum memcg_memory_event event) 1119 { 1120 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1121 event == MEMCG_SWAP_FAIL; 1122 1123 atomic_long_inc(&memcg->memory_events_local[event]); 1124 if (!swap_event) 1125 cgroup_file_notify(&memcg->events_local_file); 1126 1127 do { 1128 atomic_long_inc(&memcg->memory_events[event]); 1129 if (swap_event) 1130 cgroup_file_notify(&memcg->swap_events_file); 1131 else 1132 cgroup_file_notify(&memcg->events_file); 1133 1134 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1135 break; 1136 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1137 break; 1138 } while ((memcg = parent_mem_cgroup(memcg)) && 1139 !mem_cgroup_is_root(memcg)); 1140 } 1141 1142 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1143 enum memcg_memory_event event) 1144 { 1145 struct mem_cgroup *memcg; 1146 1147 if (mem_cgroup_disabled()) 1148 return; 1149 1150 rcu_read_lock(); 1151 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1152 if (likely(memcg)) 1153 memcg_memory_event(memcg, event); 1154 rcu_read_unlock(); 1155 } 1156 1157 void split_page_memcg(struct page *head, unsigned int nr); 1158 1159 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1160 gfp_t gfp_mask, 1161 unsigned long *total_scanned); 1162 1163 #else /* CONFIG_MEMCG */ 1164 1165 #define MEM_CGROUP_ID_SHIFT 0 1166 #define MEM_CGROUP_ID_MAX 0 1167 1168 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1169 { 1170 return NULL; 1171 } 1172 1173 static inline struct mem_cgroup *page_memcg(struct page *page) 1174 { 1175 return NULL; 1176 } 1177 1178 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1179 { 1180 WARN_ON_ONCE(!rcu_read_lock_held()); 1181 return NULL; 1182 } 1183 1184 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1185 { 1186 return NULL; 1187 } 1188 1189 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1190 { 1191 return NULL; 1192 } 1193 1194 static inline bool folio_memcg_kmem(struct folio *folio) 1195 { 1196 return false; 1197 } 1198 1199 static inline bool PageMemcgKmem(struct page *page) 1200 { 1201 return false; 1202 } 1203 1204 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1205 { 1206 return true; 1207 } 1208 1209 static inline bool mem_cgroup_disabled(void) 1210 { 1211 return true; 1212 } 1213 1214 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1215 enum memcg_memory_event event) 1216 { 1217 } 1218 1219 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1220 enum memcg_memory_event event) 1221 { 1222 } 1223 1224 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1225 struct mem_cgroup *memcg, 1226 unsigned long *min, 1227 unsigned long *low) 1228 { 1229 *min = *low = 0; 1230 } 1231 1232 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1233 struct mem_cgroup *memcg) 1234 { 1235 } 1236 1237 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1238 struct mem_cgroup *memcg) 1239 { 1240 return true; 1241 } 1242 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1243 struct mem_cgroup *memcg) 1244 { 1245 return false; 1246 } 1247 1248 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1249 struct mem_cgroup *memcg) 1250 { 1251 return false; 1252 } 1253 1254 static inline int mem_cgroup_charge(struct folio *folio, 1255 struct mm_struct *mm, gfp_t gfp) 1256 { 1257 return 0; 1258 } 1259 1260 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1261 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1262 { 1263 return 0; 1264 } 1265 1266 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1267 { 1268 } 1269 1270 static inline void mem_cgroup_uncharge(struct folio *folio) 1271 { 1272 } 1273 1274 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1275 { 1276 } 1277 1278 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1279 { 1280 } 1281 1282 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1283 struct pglist_data *pgdat) 1284 { 1285 return &pgdat->__lruvec; 1286 } 1287 1288 static inline struct lruvec *folio_lruvec(struct folio *folio) 1289 { 1290 struct pglist_data *pgdat = folio_pgdat(folio); 1291 return &pgdat->__lruvec; 1292 } 1293 1294 static inline 1295 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1296 { 1297 } 1298 1299 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1300 { 1301 return NULL; 1302 } 1303 1304 static inline bool mm_match_cgroup(struct mm_struct *mm, 1305 struct mem_cgroup *memcg) 1306 { 1307 return true; 1308 } 1309 1310 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1311 { 1312 return NULL; 1313 } 1314 1315 static inline 1316 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1317 { 1318 return NULL; 1319 } 1320 1321 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1322 { 1323 } 1324 1325 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1326 { 1327 return true; 1328 } 1329 1330 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1331 { 1332 } 1333 1334 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1335 { 1336 struct pglist_data *pgdat = folio_pgdat(folio); 1337 1338 spin_lock(&pgdat->__lruvec.lru_lock); 1339 return &pgdat->__lruvec; 1340 } 1341 1342 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1343 { 1344 struct pglist_data *pgdat = folio_pgdat(folio); 1345 1346 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1347 return &pgdat->__lruvec; 1348 } 1349 1350 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1351 unsigned long *flagsp) 1352 { 1353 struct pglist_data *pgdat = folio_pgdat(folio); 1354 1355 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1356 return &pgdat->__lruvec; 1357 } 1358 1359 static inline struct mem_cgroup * 1360 mem_cgroup_iter(struct mem_cgroup *root, 1361 struct mem_cgroup *prev, 1362 struct mem_cgroup_reclaim_cookie *reclaim) 1363 { 1364 return NULL; 1365 } 1366 1367 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1368 struct mem_cgroup *prev) 1369 { 1370 } 1371 1372 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1373 int (*fn)(struct task_struct *, void *), void *arg) 1374 { 1375 } 1376 1377 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1378 { 1379 return 0; 1380 } 1381 1382 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1383 { 1384 WARN_ON_ONCE(id); 1385 /* XXX: This should always return root_mem_cgroup */ 1386 return NULL; 1387 } 1388 1389 #ifdef CONFIG_SHRINKER_DEBUG 1390 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1391 { 1392 return 0; 1393 } 1394 1395 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1396 { 1397 return NULL; 1398 } 1399 #endif 1400 1401 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1402 { 1403 return NULL; 1404 } 1405 1406 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1407 { 1408 return NULL; 1409 } 1410 1411 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1412 { 1413 return true; 1414 } 1415 1416 static inline 1417 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1418 enum lru_list lru, int zone_idx) 1419 { 1420 return 0; 1421 } 1422 1423 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1424 { 1425 return 0; 1426 } 1427 1428 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1429 { 1430 return 0; 1431 } 1432 1433 static inline void 1434 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1435 { 1436 } 1437 1438 static inline void 1439 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1440 { 1441 } 1442 1443 static inline void folio_memcg_lock(struct folio *folio) 1444 { 1445 } 1446 1447 static inline void folio_memcg_unlock(struct folio *folio) 1448 { 1449 } 1450 1451 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 1452 { 1453 /* to match folio_memcg_rcu() */ 1454 rcu_read_lock(); 1455 return true; 1456 } 1457 1458 static inline void mem_cgroup_unlock_pages(void) 1459 { 1460 rcu_read_unlock(); 1461 } 1462 1463 static inline void mem_cgroup_handle_over_high(void) 1464 { 1465 } 1466 1467 static inline void mem_cgroup_enter_user_fault(void) 1468 { 1469 } 1470 1471 static inline void mem_cgroup_exit_user_fault(void) 1472 { 1473 } 1474 1475 static inline bool task_in_memcg_oom(struct task_struct *p) 1476 { 1477 return false; 1478 } 1479 1480 static inline bool mem_cgroup_oom_synchronize(bool wait) 1481 { 1482 return false; 1483 } 1484 1485 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1486 struct task_struct *victim, struct mem_cgroup *oom_domain) 1487 { 1488 return NULL; 1489 } 1490 1491 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1492 { 1493 } 1494 1495 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1496 int idx, 1497 int nr) 1498 { 1499 } 1500 1501 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1502 int idx, 1503 int nr) 1504 { 1505 } 1506 1507 static inline void mod_memcg_page_state(struct page *page, 1508 int idx, int val) 1509 { 1510 } 1511 1512 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1513 { 1514 return 0; 1515 } 1516 1517 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1518 enum node_stat_item idx) 1519 { 1520 return node_page_state(lruvec_pgdat(lruvec), idx); 1521 } 1522 1523 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1524 enum node_stat_item idx) 1525 { 1526 return node_page_state(lruvec_pgdat(lruvec), idx); 1527 } 1528 1529 static inline void mem_cgroup_flush_stats(void) 1530 { 1531 } 1532 1533 static inline void mem_cgroup_flush_stats_ratelimited(void) 1534 { 1535 } 1536 1537 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1538 enum node_stat_item idx, int val) 1539 { 1540 } 1541 1542 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1543 int val) 1544 { 1545 struct page *page = virt_to_head_page(p); 1546 1547 __mod_node_page_state(page_pgdat(page), idx, val); 1548 } 1549 1550 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1551 int val) 1552 { 1553 struct page *page = virt_to_head_page(p); 1554 1555 mod_node_page_state(page_pgdat(page), idx, val); 1556 } 1557 1558 static inline void count_memcg_events(struct mem_cgroup *memcg, 1559 enum vm_event_item idx, 1560 unsigned long count) 1561 { 1562 } 1563 1564 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1565 enum vm_event_item idx, 1566 unsigned long count) 1567 { 1568 } 1569 1570 static inline void count_memcg_page_event(struct page *page, 1571 int idx) 1572 { 1573 } 1574 1575 static inline void count_memcg_folio_events(struct folio *folio, 1576 enum vm_event_item idx, unsigned long nr) 1577 { 1578 } 1579 1580 static inline 1581 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1582 { 1583 } 1584 1585 static inline void split_page_memcg(struct page *head, unsigned int nr) 1586 { 1587 } 1588 1589 static inline 1590 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1591 gfp_t gfp_mask, 1592 unsigned long *total_scanned) 1593 { 1594 return 0; 1595 } 1596 #endif /* CONFIG_MEMCG */ 1597 1598 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1599 { 1600 __mod_lruvec_kmem_state(p, idx, 1); 1601 } 1602 1603 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1604 { 1605 __mod_lruvec_kmem_state(p, idx, -1); 1606 } 1607 1608 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1609 { 1610 struct mem_cgroup *memcg; 1611 1612 memcg = lruvec_memcg(lruvec); 1613 if (!memcg) 1614 return NULL; 1615 memcg = parent_mem_cgroup(memcg); 1616 if (!memcg) 1617 return NULL; 1618 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1619 } 1620 1621 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1622 { 1623 spin_unlock(&lruvec->lru_lock); 1624 } 1625 1626 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1627 { 1628 spin_unlock_irq(&lruvec->lru_lock); 1629 } 1630 1631 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1632 unsigned long flags) 1633 { 1634 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1635 } 1636 1637 /* Test requires a stable page->memcg binding, see page_memcg() */ 1638 static inline bool folio_matches_lruvec(struct folio *folio, 1639 struct lruvec *lruvec) 1640 { 1641 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1642 lruvec_memcg(lruvec) == folio_memcg(folio); 1643 } 1644 1645 /* Don't lock again iff page's lruvec locked */ 1646 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1647 struct lruvec *locked_lruvec) 1648 { 1649 if (locked_lruvec) { 1650 if (folio_matches_lruvec(folio, locked_lruvec)) 1651 return locked_lruvec; 1652 1653 unlock_page_lruvec_irq(locked_lruvec); 1654 } 1655 1656 return folio_lruvec_lock_irq(folio); 1657 } 1658 1659 /* Don't lock again iff page's lruvec locked */ 1660 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1661 struct lruvec *locked_lruvec, unsigned long *flags) 1662 { 1663 if (locked_lruvec) { 1664 if (folio_matches_lruvec(folio, locked_lruvec)) 1665 return locked_lruvec; 1666 1667 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1668 } 1669 1670 return folio_lruvec_lock_irqsave(folio, flags); 1671 } 1672 1673 #ifdef CONFIG_CGROUP_WRITEBACK 1674 1675 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1676 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1677 unsigned long *pheadroom, unsigned long *pdirty, 1678 unsigned long *pwriteback); 1679 1680 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1681 struct bdi_writeback *wb); 1682 1683 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1684 struct bdi_writeback *wb) 1685 { 1686 struct mem_cgroup *memcg; 1687 1688 if (mem_cgroup_disabled()) 1689 return; 1690 1691 memcg = folio_memcg(folio); 1692 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1693 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1694 } 1695 1696 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1697 1698 #else /* CONFIG_CGROUP_WRITEBACK */ 1699 1700 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1701 { 1702 return NULL; 1703 } 1704 1705 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1706 unsigned long *pfilepages, 1707 unsigned long *pheadroom, 1708 unsigned long *pdirty, 1709 unsigned long *pwriteback) 1710 { 1711 } 1712 1713 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1714 struct bdi_writeback *wb) 1715 { 1716 } 1717 1718 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1719 { 1720 } 1721 1722 #endif /* CONFIG_CGROUP_WRITEBACK */ 1723 1724 struct sock; 1725 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1726 gfp_t gfp_mask); 1727 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1728 #ifdef CONFIG_MEMCG 1729 extern struct static_key_false memcg_sockets_enabled_key; 1730 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1731 void mem_cgroup_sk_alloc(struct sock *sk); 1732 void mem_cgroup_sk_free(struct sock *sk); 1733 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1734 { 1735 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1736 return !!memcg->tcpmem_pressure; 1737 do { 1738 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1739 return true; 1740 } while ((memcg = parent_mem_cgroup(memcg))); 1741 return false; 1742 } 1743 1744 int alloc_shrinker_info(struct mem_cgroup *memcg); 1745 void free_shrinker_info(struct mem_cgroup *memcg); 1746 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1747 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1748 #else 1749 #define mem_cgroup_sockets_enabled 0 1750 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1751 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1752 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1753 { 1754 return false; 1755 } 1756 1757 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1758 int nid, int shrinker_id) 1759 { 1760 } 1761 #endif 1762 1763 #ifdef CONFIG_MEMCG_KMEM 1764 bool mem_cgroup_kmem_disabled(void); 1765 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1766 void __memcg_kmem_uncharge_page(struct page *page, int order); 1767 1768 struct obj_cgroup *get_obj_cgroup_from_current(void); 1769 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page); 1770 1771 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1772 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1773 1774 extern struct static_key_false memcg_bpf_enabled_key; 1775 static inline bool memcg_bpf_enabled(void) 1776 { 1777 return static_branch_likely(&memcg_bpf_enabled_key); 1778 } 1779 1780 extern struct static_key_false memcg_kmem_online_key; 1781 1782 static inline bool memcg_kmem_online(void) 1783 { 1784 return static_branch_likely(&memcg_kmem_online_key); 1785 } 1786 1787 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1788 int order) 1789 { 1790 if (memcg_kmem_online()) 1791 return __memcg_kmem_charge_page(page, gfp, order); 1792 return 0; 1793 } 1794 1795 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1796 { 1797 if (memcg_kmem_online()) 1798 __memcg_kmem_uncharge_page(page, order); 1799 } 1800 1801 /* 1802 * A helper for accessing memcg's kmem_id, used for getting 1803 * corresponding LRU lists. 1804 */ 1805 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1806 { 1807 return memcg ? memcg->kmemcg_id : -1; 1808 } 1809 1810 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1811 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1812 1813 static inline void count_objcg_event(struct obj_cgroup *objcg, 1814 enum vm_event_item idx) 1815 { 1816 struct mem_cgroup *memcg; 1817 1818 if (!memcg_kmem_online()) 1819 return; 1820 1821 rcu_read_lock(); 1822 memcg = obj_cgroup_memcg(objcg); 1823 count_memcg_events(memcg, idx, 1); 1824 rcu_read_unlock(); 1825 } 1826 1827 #else 1828 static inline bool mem_cgroup_kmem_disabled(void) 1829 { 1830 return true; 1831 } 1832 1833 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1834 int order) 1835 { 1836 return 0; 1837 } 1838 1839 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1840 { 1841 } 1842 1843 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1844 int order) 1845 { 1846 return 0; 1847 } 1848 1849 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1850 { 1851 } 1852 1853 static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) 1854 { 1855 return NULL; 1856 } 1857 1858 static inline bool memcg_bpf_enabled(void) 1859 { 1860 return false; 1861 } 1862 1863 static inline bool memcg_kmem_online(void) 1864 { 1865 return false; 1866 } 1867 1868 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1869 { 1870 return -1; 1871 } 1872 1873 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1874 { 1875 return NULL; 1876 } 1877 1878 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1879 { 1880 return NULL; 1881 } 1882 1883 static inline void count_objcg_event(struct obj_cgroup *objcg, 1884 enum vm_event_item idx) 1885 { 1886 } 1887 1888 #endif /* CONFIG_MEMCG_KMEM */ 1889 1890 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1891 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1892 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1893 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1894 #else 1895 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1896 { 1897 return true; 1898 } 1899 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1900 size_t size) 1901 { 1902 } 1903 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1904 size_t size) 1905 { 1906 } 1907 #endif 1908 1909 #endif /* _LINUX_MEMCONTROL_H */ 1910