1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_KMEM, 38 MEMCG_ZSWAP_B, 39 MEMCG_ZSWAPPED, 40 MEMCG_NR_STAT, 41 }; 42 43 enum memcg_memory_event { 44 MEMCG_LOW, 45 MEMCG_HIGH, 46 MEMCG_MAX, 47 MEMCG_OOM, 48 MEMCG_OOM_KILL, 49 MEMCG_OOM_GROUP_KILL, 50 MEMCG_SWAP_HIGH, 51 MEMCG_SWAP_MAX, 52 MEMCG_SWAP_FAIL, 53 MEMCG_NR_MEMORY_EVENTS, 54 }; 55 56 struct mem_cgroup_reclaim_cookie { 57 pg_data_t *pgdat; 58 unsigned int generation; 59 }; 60 61 #ifdef CONFIG_MEMCG 62 63 #define MEM_CGROUP_ID_SHIFT 16 64 #define MEM_CGROUP_ID_MAX USHRT_MAX 65 66 struct mem_cgroup_id { 67 int id; 68 refcount_t ref; 69 }; 70 71 /* 72 * Per memcg event counter is incremented at every pagein/pageout. With THP, 73 * it will be incremented by the number of pages. This counter is used 74 * to trigger some periodic events. This is straightforward and better 75 * than using jiffies etc. to handle periodic memcg event. 76 */ 77 enum mem_cgroup_events_target { 78 MEM_CGROUP_TARGET_THRESH, 79 MEM_CGROUP_TARGET_SOFTLIMIT, 80 MEM_CGROUP_NTARGETS, 81 }; 82 83 struct memcg_vmstats_percpu { 84 /* Local (CPU and cgroup) page state & events */ 85 long state[MEMCG_NR_STAT]; 86 unsigned long events[NR_VM_EVENT_ITEMS]; 87 88 /* Delta calculation for lockless upward propagation */ 89 long state_prev[MEMCG_NR_STAT]; 90 unsigned long events_prev[NR_VM_EVENT_ITEMS]; 91 92 /* Cgroup1: threshold notifications & softlimit tree updates */ 93 unsigned long nr_page_events; 94 unsigned long targets[MEM_CGROUP_NTARGETS]; 95 }; 96 97 struct memcg_vmstats { 98 /* Aggregated (CPU and subtree) page state & events */ 99 long state[MEMCG_NR_STAT]; 100 unsigned long events[NR_VM_EVENT_ITEMS]; 101 102 /* Pending child counts during tree propagation */ 103 long state_pending[MEMCG_NR_STAT]; 104 unsigned long events_pending[NR_VM_EVENT_ITEMS]; 105 }; 106 107 struct mem_cgroup_reclaim_iter { 108 struct mem_cgroup *position; 109 /* scan generation, increased every round-trip */ 110 unsigned int generation; 111 }; 112 113 /* 114 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 115 * shrinkers, which have elements charged to this memcg. 116 */ 117 struct shrinker_info { 118 struct rcu_head rcu; 119 atomic_long_t *nr_deferred; 120 unsigned long *map; 121 }; 122 123 struct lruvec_stats_percpu { 124 /* Local (CPU and cgroup) state */ 125 long state[NR_VM_NODE_STAT_ITEMS]; 126 127 /* Delta calculation for lockless upward propagation */ 128 long state_prev[NR_VM_NODE_STAT_ITEMS]; 129 }; 130 131 struct lruvec_stats { 132 /* Aggregated (CPU and subtree) state */ 133 long state[NR_VM_NODE_STAT_ITEMS]; 134 135 /* Pending child counts during tree propagation */ 136 long state_pending[NR_VM_NODE_STAT_ITEMS]; 137 }; 138 139 /* 140 * per-node information in memory controller. 141 */ 142 struct mem_cgroup_per_node { 143 struct lruvec lruvec; 144 145 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 146 struct lruvec_stats lruvec_stats; 147 148 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 149 150 struct mem_cgroup_reclaim_iter iter; 151 152 struct shrinker_info __rcu *shrinker_info; 153 154 struct rb_node tree_node; /* RB tree node */ 155 unsigned long usage_in_excess;/* Set to the value by which */ 156 /* the soft limit is exceeded*/ 157 bool on_tree; 158 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 159 /* use container_of */ 160 }; 161 162 struct mem_cgroup_threshold { 163 struct eventfd_ctx *eventfd; 164 unsigned long threshold; 165 }; 166 167 /* For threshold */ 168 struct mem_cgroup_threshold_ary { 169 /* An array index points to threshold just below or equal to usage. */ 170 int current_threshold; 171 /* Size of entries[] */ 172 unsigned int size; 173 /* Array of thresholds */ 174 struct mem_cgroup_threshold entries[]; 175 }; 176 177 struct mem_cgroup_thresholds { 178 /* Primary thresholds array */ 179 struct mem_cgroup_threshold_ary *primary; 180 /* 181 * Spare threshold array. 182 * This is needed to make mem_cgroup_unregister_event() "never fail". 183 * It must be able to store at least primary->size - 1 entries. 184 */ 185 struct mem_cgroup_threshold_ary *spare; 186 }; 187 188 /* 189 * Remember four most recent foreign writebacks with dirty pages in this 190 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 191 * one in a given round, we're likely to catch it later if it keeps 192 * foreign-dirtying, so a fairly low count should be enough. 193 * 194 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 195 */ 196 #define MEMCG_CGWB_FRN_CNT 4 197 198 struct memcg_cgwb_frn { 199 u64 bdi_id; /* bdi->id of the foreign inode */ 200 int memcg_id; /* memcg->css.id of foreign inode */ 201 u64 at; /* jiffies_64 at the time of dirtying */ 202 struct wb_completion done; /* tracks in-flight foreign writebacks */ 203 }; 204 205 /* 206 * Bucket for arbitrarily byte-sized objects charged to a memory 207 * cgroup. The bucket can be reparented in one piece when the cgroup 208 * is destroyed, without having to round up the individual references 209 * of all live memory objects in the wild. 210 */ 211 struct obj_cgroup { 212 struct percpu_ref refcnt; 213 struct mem_cgroup *memcg; 214 atomic_t nr_charged_bytes; 215 union { 216 struct list_head list; /* protected by objcg_lock */ 217 struct rcu_head rcu; 218 }; 219 }; 220 221 /* 222 * The memory controller data structure. The memory controller controls both 223 * page cache and RSS per cgroup. We would eventually like to provide 224 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 225 * to help the administrator determine what knobs to tune. 226 */ 227 struct mem_cgroup { 228 struct cgroup_subsys_state css; 229 230 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 231 struct mem_cgroup_id id; 232 233 /* Accounted resources */ 234 struct page_counter memory; /* Both v1 & v2 */ 235 236 union { 237 struct page_counter swap; /* v2 only */ 238 struct page_counter memsw; /* v1 only */ 239 }; 240 241 /* Legacy consumer-oriented counters */ 242 struct page_counter kmem; /* v1 only */ 243 struct page_counter tcpmem; /* v1 only */ 244 245 /* Range enforcement for interrupt charges */ 246 struct work_struct high_work; 247 248 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 249 unsigned long zswap_max; 250 #endif 251 252 unsigned long soft_limit; 253 254 /* vmpressure notifications */ 255 struct vmpressure vmpressure; 256 257 /* 258 * Should the OOM killer kill all belonging tasks, had it kill one? 259 */ 260 bool oom_group; 261 262 /* protected by memcg_oom_lock */ 263 bool oom_lock; 264 int under_oom; 265 266 int swappiness; 267 /* OOM-Killer disable */ 268 int oom_kill_disable; 269 270 /* memory.events and memory.events.local */ 271 struct cgroup_file events_file; 272 struct cgroup_file events_local_file; 273 274 /* handle for "memory.swap.events" */ 275 struct cgroup_file swap_events_file; 276 277 /* protect arrays of thresholds */ 278 struct mutex thresholds_lock; 279 280 /* thresholds for memory usage. RCU-protected */ 281 struct mem_cgroup_thresholds thresholds; 282 283 /* thresholds for mem+swap usage. RCU-protected */ 284 struct mem_cgroup_thresholds memsw_thresholds; 285 286 /* For oom notifier event fd */ 287 struct list_head oom_notify; 288 289 /* 290 * Should we move charges of a task when a task is moved into this 291 * mem_cgroup ? And what type of charges should we move ? 292 */ 293 unsigned long move_charge_at_immigrate; 294 /* taken only while moving_account > 0 */ 295 spinlock_t move_lock; 296 unsigned long move_lock_flags; 297 298 CACHELINE_PADDING(_pad1_); 299 300 /* memory.stat */ 301 struct memcg_vmstats vmstats; 302 303 /* memory.events */ 304 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 305 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 306 307 unsigned long socket_pressure; 308 309 /* Legacy tcp memory accounting */ 310 bool tcpmem_active; 311 int tcpmem_pressure; 312 313 #ifdef CONFIG_MEMCG_KMEM 314 int kmemcg_id; 315 struct obj_cgroup __rcu *objcg; 316 /* list of inherited objcgs, protected by objcg_lock */ 317 struct list_head objcg_list; 318 #endif 319 320 CACHELINE_PADDING(_pad2_); 321 322 /* 323 * set > 0 if pages under this cgroup are moving to other cgroup. 324 */ 325 atomic_t moving_account; 326 struct task_struct *move_lock_task; 327 328 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 329 330 #ifdef CONFIG_CGROUP_WRITEBACK 331 struct list_head cgwb_list; 332 struct wb_domain cgwb_domain; 333 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 334 #endif 335 336 /* List of events which userspace want to receive */ 337 struct list_head event_list; 338 spinlock_t event_list_lock; 339 340 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 341 struct deferred_split deferred_split_queue; 342 #endif 343 344 #ifdef CONFIG_LRU_GEN 345 /* per-memcg mm_struct list */ 346 struct lru_gen_mm_list mm_list; 347 #endif 348 349 struct mem_cgroup_per_node *nodeinfo[]; 350 }; 351 352 /* 353 * size of first charge trial. 354 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 355 * workload. 356 */ 357 #define MEMCG_CHARGE_BATCH 64U 358 359 extern struct mem_cgroup *root_mem_cgroup; 360 361 enum page_memcg_data_flags { 362 /* page->memcg_data is a pointer to an objcgs vector */ 363 MEMCG_DATA_OBJCGS = (1UL << 0), 364 /* page has been accounted as a non-slab kernel page */ 365 MEMCG_DATA_KMEM = (1UL << 1), 366 /* the next bit after the last actual flag */ 367 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 368 }; 369 370 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 371 372 static inline bool folio_memcg_kmem(struct folio *folio); 373 374 /* 375 * After the initialization objcg->memcg is always pointing at 376 * a valid memcg, but can be atomically swapped to the parent memcg. 377 * 378 * The caller must ensure that the returned memcg won't be released: 379 * e.g. acquire the rcu_read_lock or css_set_lock. 380 */ 381 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 382 { 383 return READ_ONCE(objcg->memcg); 384 } 385 386 /* 387 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 388 * @folio: Pointer to the folio. 389 * 390 * Returns a pointer to the memory cgroup associated with the folio, 391 * or NULL. This function assumes that the folio is known to have a 392 * proper memory cgroup pointer. It's not safe to call this function 393 * against some type of folios, e.g. slab folios or ex-slab folios or 394 * kmem folios. 395 */ 396 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 397 { 398 unsigned long memcg_data = folio->memcg_data; 399 400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 402 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 403 404 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 405 } 406 407 /* 408 * __folio_objcg - get the object cgroup associated with a kmem folio. 409 * @folio: Pointer to the folio. 410 * 411 * Returns a pointer to the object cgroup associated with the folio, 412 * or NULL. This function assumes that the folio is known to have a 413 * proper object cgroup pointer. It's not safe to call this function 414 * against some type of folios, e.g. slab folios or ex-slab folios or 415 * LRU folios. 416 */ 417 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 418 { 419 unsigned long memcg_data = folio->memcg_data; 420 421 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 422 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 423 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 424 425 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 426 } 427 428 /* 429 * folio_memcg - Get the memory cgroup associated with a folio. 430 * @folio: Pointer to the folio. 431 * 432 * Returns a pointer to the memory cgroup associated with the folio, 433 * or NULL. This function assumes that the folio is known to have a 434 * proper memory cgroup pointer. It's not safe to call this function 435 * against some type of folios, e.g. slab folios or ex-slab folios. 436 * 437 * For a non-kmem folio any of the following ensures folio and memcg binding 438 * stability: 439 * 440 * - the folio lock 441 * - LRU isolation 442 * - lock_page_memcg() 443 * - exclusive reference 444 * - mem_cgroup_trylock_pages() 445 * 446 * For a kmem folio a caller should hold an rcu read lock to protect memcg 447 * associated with a kmem folio from being released. 448 */ 449 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 450 { 451 if (folio_memcg_kmem(folio)) 452 return obj_cgroup_memcg(__folio_objcg(folio)); 453 return __folio_memcg(folio); 454 } 455 456 static inline struct mem_cgroup *page_memcg(struct page *page) 457 { 458 return folio_memcg(page_folio(page)); 459 } 460 461 /** 462 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 463 * @folio: Pointer to the folio. 464 * 465 * This function assumes that the folio is known to have a 466 * proper memory cgroup pointer. It's not safe to call this function 467 * against some type of folios, e.g. slab folios or ex-slab folios. 468 * 469 * Return: A pointer to the memory cgroup associated with the folio, 470 * or NULL. 471 */ 472 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 473 { 474 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 475 476 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 477 WARN_ON_ONCE(!rcu_read_lock_held()); 478 479 if (memcg_data & MEMCG_DATA_KMEM) { 480 struct obj_cgroup *objcg; 481 482 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 483 return obj_cgroup_memcg(objcg); 484 } 485 486 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 487 } 488 489 /* 490 * page_memcg_check - get the memory cgroup associated with a page 491 * @page: a pointer to the page struct 492 * 493 * Returns a pointer to the memory cgroup associated with the page, 494 * or NULL. This function unlike page_memcg() can take any page 495 * as an argument. It has to be used in cases when it's not known if a page 496 * has an associated memory cgroup pointer or an object cgroups vector or 497 * an object cgroup. 498 * 499 * For a non-kmem page any of the following ensures page and memcg binding 500 * stability: 501 * 502 * - the page lock 503 * - LRU isolation 504 * - lock_page_memcg() 505 * - exclusive reference 506 * - mem_cgroup_trylock_pages() 507 * 508 * For a kmem page a caller should hold an rcu read lock to protect memcg 509 * associated with a kmem page from being released. 510 */ 511 static inline struct mem_cgroup *page_memcg_check(struct page *page) 512 { 513 /* 514 * Because page->memcg_data might be changed asynchronously 515 * for slab pages, READ_ONCE() should be used here. 516 */ 517 unsigned long memcg_data = READ_ONCE(page->memcg_data); 518 519 if (memcg_data & MEMCG_DATA_OBJCGS) 520 return NULL; 521 522 if (memcg_data & MEMCG_DATA_KMEM) { 523 struct obj_cgroup *objcg; 524 525 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 526 return obj_cgroup_memcg(objcg); 527 } 528 529 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 530 } 531 532 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 533 { 534 struct mem_cgroup *memcg; 535 536 rcu_read_lock(); 537 retry: 538 memcg = obj_cgroup_memcg(objcg); 539 if (unlikely(!css_tryget(&memcg->css))) 540 goto retry; 541 rcu_read_unlock(); 542 543 return memcg; 544 } 545 546 #ifdef CONFIG_MEMCG_KMEM 547 /* 548 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 549 * @folio: Pointer to the folio. 550 * 551 * Checks if the folio has MemcgKmem flag set. The caller must ensure 552 * that the folio has an associated memory cgroup. It's not safe to call 553 * this function against some types of folios, e.g. slab folios. 554 */ 555 static inline bool folio_memcg_kmem(struct folio *folio) 556 { 557 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 558 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 559 return folio->memcg_data & MEMCG_DATA_KMEM; 560 } 561 562 563 #else 564 static inline bool folio_memcg_kmem(struct folio *folio) 565 { 566 return false; 567 } 568 569 #endif 570 571 static inline bool PageMemcgKmem(struct page *page) 572 { 573 return folio_memcg_kmem(page_folio(page)); 574 } 575 576 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 577 { 578 return (memcg == root_mem_cgroup); 579 } 580 581 static inline bool mem_cgroup_disabled(void) 582 { 583 return !cgroup_subsys_enabled(memory_cgrp_subsys); 584 } 585 586 static inline void mem_cgroup_protection(struct mem_cgroup *root, 587 struct mem_cgroup *memcg, 588 unsigned long *min, 589 unsigned long *low) 590 { 591 *min = *low = 0; 592 593 if (mem_cgroup_disabled()) 594 return; 595 596 /* 597 * There is no reclaim protection applied to a targeted reclaim. 598 * We are special casing this specific case here because 599 * mem_cgroup_protected calculation is not robust enough to keep 600 * the protection invariant for calculated effective values for 601 * parallel reclaimers with different reclaim target. This is 602 * especially a problem for tail memcgs (as they have pages on LRU) 603 * which would want to have effective values 0 for targeted reclaim 604 * but a different value for external reclaim. 605 * 606 * Example 607 * Let's have global and A's reclaim in parallel: 608 * | 609 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 610 * |\ 611 * | C (low = 1G, usage = 2.5G) 612 * B (low = 1G, usage = 0.5G) 613 * 614 * For the global reclaim 615 * A.elow = A.low 616 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 617 * C.elow = min(C.usage, C.low) 618 * 619 * With the effective values resetting we have A reclaim 620 * A.elow = 0 621 * B.elow = B.low 622 * C.elow = C.low 623 * 624 * If the global reclaim races with A's reclaim then 625 * B.elow = C.elow = 0 because children_low_usage > A.elow) 626 * is possible and reclaiming B would be violating the protection. 627 * 628 */ 629 if (root == memcg) 630 return; 631 632 *min = READ_ONCE(memcg->memory.emin); 633 *low = READ_ONCE(memcg->memory.elow); 634 } 635 636 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 637 struct mem_cgroup *memcg); 638 639 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) 640 { 641 /* 642 * The root memcg doesn't account charges, and doesn't support 643 * protection. 644 */ 645 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); 646 647 } 648 649 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 650 { 651 if (!mem_cgroup_supports_protection(memcg)) 652 return false; 653 654 return READ_ONCE(memcg->memory.elow) >= 655 page_counter_read(&memcg->memory); 656 } 657 658 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 659 { 660 if (!mem_cgroup_supports_protection(memcg)) 661 return false; 662 663 return READ_ONCE(memcg->memory.emin) >= 664 page_counter_read(&memcg->memory); 665 } 666 667 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 668 669 /** 670 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 671 * @folio: Folio to charge. 672 * @mm: mm context of the allocating task. 673 * @gfp: Reclaim mode. 674 * 675 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 676 * pages according to @gfp if necessary. If @mm is NULL, try to 677 * charge to the active memcg. 678 * 679 * Do not use this for folios allocated for swapin. 680 * 681 * Return: 0 on success. Otherwise, an error code is returned. 682 */ 683 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 684 gfp_t gfp) 685 { 686 if (mem_cgroup_disabled()) 687 return 0; 688 return __mem_cgroup_charge(folio, mm, gfp); 689 } 690 691 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 692 gfp_t gfp, swp_entry_t entry); 693 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 694 695 void __mem_cgroup_uncharge(struct folio *folio); 696 697 /** 698 * mem_cgroup_uncharge - Uncharge a folio. 699 * @folio: Folio to uncharge. 700 * 701 * Uncharge a folio previously charged with mem_cgroup_charge(). 702 */ 703 static inline void mem_cgroup_uncharge(struct folio *folio) 704 { 705 if (mem_cgroup_disabled()) 706 return; 707 __mem_cgroup_uncharge(folio); 708 } 709 710 void __mem_cgroup_uncharge_list(struct list_head *page_list); 711 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 712 { 713 if (mem_cgroup_disabled()) 714 return; 715 __mem_cgroup_uncharge_list(page_list); 716 } 717 718 void mem_cgroup_migrate(struct folio *old, struct folio *new); 719 720 /** 721 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 722 * @memcg: memcg of the wanted lruvec 723 * @pgdat: pglist_data 724 * 725 * Returns the lru list vector holding pages for a given @memcg & 726 * @pgdat combination. This can be the node lruvec, if the memory 727 * controller is disabled. 728 */ 729 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 730 struct pglist_data *pgdat) 731 { 732 struct mem_cgroup_per_node *mz; 733 struct lruvec *lruvec; 734 735 if (mem_cgroup_disabled()) { 736 lruvec = &pgdat->__lruvec; 737 goto out; 738 } 739 740 if (!memcg) 741 memcg = root_mem_cgroup; 742 743 mz = memcg->nodeinfo[pgdat->node_id]; 744 lruvec = &mz->lruvec; 745 out: 746 /* 747 * Since a node can be onlined after the mem_cgroup was created, 748 * we have to be prepared to initialize lruvec->pgdat here; 749 * and if offlined then reonlined, we need to reinitialize it. 750 */ 751 if (unlikely(lruvec->pgdat != pgdat)) 752 lruvec->pgdat = pgdat; 753 return lruvec; 754 } 755 756 /** 757 * folio_lruvec - return lruvec for isolating/putting an LRU folio 758 * @folio: Pointer to the folio. 759 * 760 * This function relies on folio->mem_cgroup being stable. 761 */ 762 static inline struct lruvec *folio_lruvec(struct folio *folio) 763 { 764 struct mem_cgroup *memcg = folio_memcg(folio); 765 766 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 767 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 768 } 769 770 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 771 772 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 773 774 struct lruvec *folio_lruvec_lock(struct folio *folio); 775 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 776 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 777 unsigned long *flags); 778 779 #ifdef CONFIG_DEBUG_VM 780 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 781 #else 782 static inline 783 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 784 { 785 } 786 #endif 787 788 static inline 789 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 790 return css ? container_of(css, struct mem_cgroup, css) : NULL; 791 } 792 793 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 794 { 795 return percpu_ref_tryget(&objcg->refcnt); 796 } 797 798 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 799 { 800 percpu_ref_get(&objcg->refcnt); 801 } 802 803 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 804 unsigned long nr) 805 { 806 percpu_ref_get_many(&objcg->refcnt, nr); 807 } 808 809 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 810 { 811 percpu_ref_put(&objcg->refcnt); 812 } 813 814 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 815 { 816 if (memcg) 817 css_put(&memcg->css); 818 } 819 820 #define mem_cgroup_from_counter(counter, member) \ 821 container_of(counter, struct mem_cgroup, member) 822 823 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 824 struct mem_cgroup *, 825 struct mem_cgroup_reclaim_cookie *); 826 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 827 int mem_cgroup_scan_tasks(struct mem_cgroup *, 828 int (*)(struct task_struct *, void *), void *); 829 830 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 831 { 832 if (mem_cgroup_disabled()) 833 return 0; 834 835 return memcg->id.id; 836 } 837 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 838 839 #ifdef CONFIG_SHRINKER_DEBUG 840 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 841 { 842 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 843 } 844 845 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 846 #endif 847 848 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 849 { 850 return mem_cgroup_from_css(seq_css(m)); 851 } 852 853 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 854 { 855 struct mem_cgroup_per_node *mz; 856 857 if (mem_cgroup_disabled()) 858 return NULL; 859 860 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 861 return mz->memcg; 862 } 863 864 /** 865 * parent_mem_cgroup - find the accounting parent of a memcg 866 * @memcg: memcg whose parent to find 867 * 868 * Returns the parent memcg, or NULL if this is the root or the memory 869 * controller is in legacy no-hierarchy mode. 870 */ 871 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 872 { 873 return mem_cgroup_from_css(memcg->css.parent); 874 } 875 876 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 877 struct mem_cgroup *root) 878 { 879 if (root == memcg) 880 return true; 881 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 882 } 883 884 static inline bool mm_match_cgroup(struct mm_struct *mm, 885 struct mem_cgroup *memcg) 886 { 887 struct mem_cgroup *task_memcg; 888 bool match = false; 889 890 rcu_read_lock(); 891 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 892 if (task_memcg) 893 match = mem_cgroup_is_descendant(task_memcg, memcg); 894 rcu_read_unlock(); 895 return match; 896 } 897 898 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 899 ino_t page_cgroup_ino(struct page *page); 900 901 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 902 { 903 if (mem_cgroup_disabled()) 904 return true; 905 return !!(memcg->css.flags & CSS_ONLINE); 906 } 907 908 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 909 int zid, int nr_pages); 910 911 static inline 912 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 913 enum lru_list lru, int zone_idx) 914 { 915 struct mem_cgroup_per_node *mz; 916 917 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 918 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 919 } 920 921 void mem_cgroup_handle_over_high(void); 922 923 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 924 925 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 926 927 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 928 struct task_struct *p); 929 930 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 931 932 static inline void mem_cgroup_enter_user_fault(void) 933 { 934 WARN_ON(current->in_user_fault); 935 current->in_user_fault = 1; 936 } 937 938 static inline void mem_cgroup_exit_user_fault(void) 939 { 940 WARN_ON(!current->in_user_fault); 941 current->in_user_fault = 0; 942 } 943 944 static inline bool task_in_memcg_oom(struct task_struct *p) 945 { 946 return p->memcg_in_oom; 947 } 948 949 bool mem_cgroup_oom_synchronize(bool wait); 950 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 951 struct mem_cgroup *oom_domain); 952 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 953 954 void folio_memcg_lock(struct folio *folio); 955 void folio_memcg_unlock(struct folio *folio); 956 void lock_page_memcg(struct page *page); 957 void unlock_page_memcg(struct page *page); 958 959 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 960 961 /* try to stablize folio_memcg() for all the pages in a memcg */ 962 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 963 { 964 rcu_read_lock(); 965 966 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) 967 return true; 968 969 rcu_read_unlock(); 970 return false; 971 } 972 973 static inline void mem_cgroup_unlock_pages(void) 974 { 975 rcu_read_unlock(); 976 } 977 978 /* idx can be of type enum memcg_stat_item or node_stat_item */ 979 static inline void mod_memcg_state(struct mem_cgroup *memcg, 980 int idx, int val) 981 { 982 unsigned long flags; 983 984 local_irq_save(flags); 985 __mod_memcg_state(memcg, idx, val); 986 local_irq_restore(flags); 987 } 988 989 static inline void mod_memcg_page_state(struct page *page, 990 int idx, int val) 991 { 992 struct mem_cgroup *memcg; 993 994 if (mem_cgroup_disabled()) 995 return; 996 997 rcu_read_lock(); 998 memcg = page_memcg(page); 999 if (memcg) 1000 mod_memcg_state(memcg, idx, val); 1001 rcu_read_unlock(); 1002 } 1003 1004 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1005 { 1006 long x = READ_ONCE(memcg->vmstats.state[idx]); 1007 #ifdef CONFIG_SMP 1008 if (x < 0) 1009 x = 0; 1010 #endif 1011 return x; 1012 } 1013 1014 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1015 enum node_stat_item idx) 1016 { 1017 struct mem_cgroup_per_node *pn; 1018 long x; 1019 1020 if (mem_cgroup_disabled()) 1021 return node_page_state(lruvec_pgdat(lruvec), idx); 1022 1023 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1024 x = READ_ONCE(pn->lruvec_stats.state[idx]); 1025 #ifdef CONFIG_SMP 1026 if (x < 0) 1027 x = 0; 1028 #endif 1029 return x; 1030 } 1031 1032 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1033 enum node_stat_item idx) 1034 { 1035 struct mem_cgroup_per_node *pn; 1036 long x = 0; 1037 int cpu; 1038 1039 if (mem_cgroup_disabled()) 1040 return node_page_state(lruvec_pgdat(lruvec), idx); 1041 1042 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1043 for_each_possible_cpu(cpu) 1044 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 1045 #ifdef CONFIG_SMP 1046 if (x < 0) 1047 x = 0; 1048 #endif 1049 return x; 1050 } 1051 1052 void mem_cgroup_flush_stats(void); 1053 void mem_cgroup_flush_stats_delayed(void); 1054 1055 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1056 int val); 1057 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1058 1059 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1060 int val) 1061 { 1062 unsigned long flags; 1063 1064 local_irq_save(flags); 1065 __mod_lruvec_kmem_state(p, idx, val); 1066 local_irq_restore(flags); 1067 } 1068 1069 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1070 enum node_stat_item idx, int val) 1071 { 1072 unsigned long flags; 1073 1074 local_irq_save(flags); 1075 __mod_memcg_lruvec_state(lruvec, idx, val); 1076 local_irq_restore(flags); 1077 } 1078 1079 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1080 unsigned long count); 1081 1082 static inline void count_memcg_events(struct mem_cgroup *memcg, 1083 enum vm_event_item idx, 1084 unsigned long count) 1085 { 1086 unsigned long flags; 1087 1088 local_irq_save(flags); 1089 __count_memcg_events(memcg, idx, count); 1090 local_irq_restore(flags); 1091 } 1092 1093 static inline void count_memcg_page_event(struct page *page, 1094 enum vm_event_item idx) 1095 { 1096 struct mem_cgroup *memcg = page_memcg(page); 1097 1098 if (memcg) 1099 count_memcg_events(memcg, idx, 1); 1100 } 1101 1102 static inline void count_memcg_folio_events(struct folio *folio, 1103 enum vm_event_item idx, unsigned long nr) 1104 { 1105 struct mem_cgroup *memcg = folio_memcg(folio); 1106 1107 if (memcg) 1108 count_memcg_events(memcg, idx, nr); 1109 } 1110 1111 static inline void count_memcg_event_mm(struct mm_struct *mm, 1112 enum vm_event_item idx) 1113 { 1114 struct mem_cgroup *memcg; 1115 1116 if (mem_cgroup_disabled()) 1117 return; 1118 1119 rcu_read_lock(); 1120 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1121 if (likely(memcg)) 1122 count_memcg_events(memcg, idx, 1); 1123 rcu_read_unlock(); 1124 } 1125 1126 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1127 enum memcg_memory_event event) 1128 { 1129 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1130 event == MEMCG_SWAP_FAIL; 1131 1132 atomic_long_inc(&memcg->memory_events_local[event]); 1133 if (!swap_event) 1134 cgroup_file_notify(&memcg->events_local_file); 1135 1136 do { 1137 atomic_long_inc(&memcg->memory_events[event]); 1138 if (swap_event) 1139 cgroup_file_notify(&memcg->swap_events_file); 1140 else 1141 cgroup_file_notify(&memcg->events_file); 1142 1143 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1144 break; 1145 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1146 break; 1147 } while ((memcg = parent_mem_cgroup(memcg)) && 1148 !mem_cgroup_is_root(memcg)); 1149 } 1150 1151 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1152 enum memcg_memory_event event) 1153 { 1154 struct mem_cgroup *memcg; 1155 1156 if (mem_cgroup_disabled()) 1157 return; 1158 1159 rcu_read_lock(); 1160 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1161 if (likely(memcg)) 1162 memcg_memory_event(memcg, event); 1163 rcu_read_unlock(); 1164 } 1165 1166 void split_page_memcg(struct page *head, unsigned int nr); 1167 1168 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1169 gfp_t gfp_mask, 1170 unsigned long *total_scanned); 1171 1172 #else /* CONFIG_MEMCG */ 1173 1174 #define MEM_CGROUP_ID_SHIFT 0 1175 #define MEM_CGROUP_ID_MAX 0 1176 1177 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1178 { 1179 return NULL; 1180 } 1181 1182 static inline struct mem_cgroup *page_memcg(struct page *page) 1183 { 1184 return NULL; 1185 } 1186 1187 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1188 { 1189 WARN_ON_ONCE(!rcu_read_lock_held()); 1190 return NULL; 1191 } 1192 1193 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1194 { 1195 return NULL; 1196 } 1197 1198 static inline bool folio_memcg_kmem(struct folio *folio) 1199 { 1200 return false; 1201 } 1202 1203 static inline bool PageMemcgKmem(struct page *page) 1204 { 1205 return false; 1206 } 1207 1208 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1209 { 1210 return true; 1211 } 1212 1213 static inline bool mem_cgroup_disabled(void) 1214 { 1215 return true; 1216 } 1217 1218 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1219 enum memcg_memory_event event) 1220 { 1221 } 1222 1223 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1224 enum memcg_memory_event event) 1225 { 1226 } 1227 1228 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1229 struct mem_cgroup *memcg, 1230 unsigned long *min, 1231 unsigned long *low) 1232 { 1233 *min = *low = 0; 1234 } 1235 1236 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1237 struct mem_cgroup *memcg) 1238 { 1239 } 1240 1241 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 1242 { 1243 return false; 1244 } 1245 1246 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 1247 { 1248 return false; 1249 } 1250 1251 static inline int mem_cgroup_charge(struct folio *folio, 1252 struct mm_struct *mm, gfp_t gfp) 1253 { 1254 return 0; 1255 } 1256 1257 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1258 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1259 { 1260 return 0; 1261 } 1262 1263 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1264 { 1265 } 1266 1267 static inline void mem_cgroup_uncharge(struct folio *folio) 1268 { 1269 } 1270 1271 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1272 { 1273 } 1274 1275 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1276 { 1277 } 1278 1279 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1280 struct pglist_data *pgdat) 1281 { 1282 return &pgdat->__lruvec; 1283 } 1284 1285 static inline struct lruvec *folio_lruvec(struct folio *folio) 1286 { 1287 struct pglist_data *pgdat = folio_pgdat(folio); 1288 return &pgdat->__lruvec; 1289 } 1290 1291 static inline 1292 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1293 { 1294 } 1295 1296 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1297 { 1298 return NULL; 1299 } 1300 1301 static inline bool mm_match_cgroup(struct mm_struct *mm, 1302 struct mem_cgroup *memcg) 1303 { 1304 return true; 1305 } 1306 1307 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1308 { 1309 return NULL; 1310 } 1311 1312 static inline 1313 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1314 { 1315 return NULL; 1316 } 1317 1318 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1319 { 1320 } 1321 1322 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1323 { 1324 } 1325 1326 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1327 { 1328 struct pglist_data *pgdat = folio_pgdat(folio); 1329 1330 spin_lock(&pgdat->__lruvec.lru_lock); 1331 return &pgdat->__lruvec; 1332 } 1333 1334 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1335 { 1336 struct pglist_data *pgdat = folio_pgdat(folio); 1337 1338 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1339 return &pgdat->__lruvec; 1340 } 1341 1342 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1343 unsigned long *flagsp) 1344 { 1345 struct pglist_data *pgdat = folio_pgdat(folio); 1346 1347 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1348 return &pgdat->__lruvec; 1349 } 1350 1351 static inline struct mem_cgroup * 1352 mem_cgroup_iter(struct mem_cgroup *root, 1353 struct mem_cgroup *prev, 1354 struct mem_cgroup_reclaim_cookie *reclaim) 1355 { 1356 return NULL; 1357 } 1358 1359 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1360 struct mem_cgroup *prev) 1361 { 1362 } 1363 1364 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1365 int (*fn)(struct task_struct *, void *), void *arg) 1366 { 1367 return 0; 1368 } 1369 1370 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1371 { 1372 return 0; 1373 } 1374 1375 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1376 { 1377 WARN_ON_ONCE(id); 1378 /* XXX: This should always return root_mem_cgroup */ 1379 return NULL; 1380 } 1381 1382 #ifdef CONFIG_SHRINKER_DEBUG 1383 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1384 { 1385 return 0; 1386 } 1387 1388 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1389 { 1390 return NULL; 1391 } 1392 #endif 1393 1394 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1395 { 1396 return NULL; 1397 } 1398 1399 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1400 { 1401 return NULL; 1402 } 1403 1404 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1405 { 1406 return true; 1407 } 1408 1409 static inline 1410 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1411 enum lru_list lru, int zone_idx) 1412 { 1413 return 0; 1414 } 1415 1416 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1417 { 1418 return 0; 1419 } 1420 1421 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1422 { 1423 return 0; 1424 } 1425 1426 static inline void 1427 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1428 { 1429 } 1430 1431 static inline void 1432 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1433 { 1434 } 1435 1436 static inline void lock_page_memcg(struct page *page) 1437 { 1438 } 1439 1440 static inline void unlock_page_memcg(struct page *page) 1441 { 1442 } 1443 1444 static inline void folio_memcg_lock(struct folio *folio) 1445 { 1446 } 1447 1448 static inline void folio_memcg_unlock(struct folio *folio) 1449 { 1450 } 1451 1452 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 1453 { 1454 /* to match folio_memcg_rcu() */ 1455 rcu_read_lock(); 1456 return true; 1457 } 1458 1459 static inline void mem_cgroup_unlock_pages(void) 1460 { 1461 rcu_read_unlock(); 1462 } 1463 1464 static inline void mem_cgroup_handle_over_high(void) 1465 { 1466 } 1467 1468 static inline void mem_cgroup_enter_user_fault(void) 1469 { 1470 } 1471 1472 static inline void mem_cgroup_exit_user_fault(void) 1473 { 1474 } 1475 1476 static inline bool task_in_memcg_oom(struct task_struct *p) 1477 { 1478 return false; 1479 } 1480 1481 static inline bool mem_cgroup_oom_synchronize(bool wait) 1482 { 1483 return false; 1484 } 1485 1486 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1487 struct task_struct *victim, struct mem_cgroup *oom_domain) 1488 { 1489 return NULL; 1490 } 1491 1492 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1493 { 1494 } 1495 1496 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1497 int idx, 1498 int nr) 1499 { 1500 } 1501 1502 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1503 int idx, 1504 int nr) 1505 { 1506 } 1507 1508 static inline void mod_memcg_page_state(struct page *page, 1509 int idx, int val) 1510 { 1511 } 1512 1513 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1514 { 1515 return 0; 1516 } 1517 1518 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1519 enum node_stat_item idx) 1520 { 1521 return node_page_state(lruvec_pgdat(lruvec), idx); 1522 } 1523 1524 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1525 enum node_stat_item idx) 1526 { 1527 return node_page_state(lruvec_pgdat(lruvec), idx); 1528 } 1529 1530 static inline void mem_cgroup_flush_stats(void) 1531 { 1532 } 1533 1534 static inline void mem_cgroup_flush_stats_delayed(void) 1535 { 1536 } 1537 1538 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1539 enum node_stat_item idx, int val) 1540 { 1541 } 1542 1543 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1544 int val) 1545 { 1546 struct page *page = virt_to_head_page(p); 1547 1548 __mod_node_page_state(page_pgdat(page), idx, val); 1549 } 1550 1551 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1552 int val) 1553 { 1554 struct page *page = virt_to_head_page(p); 1555 1556 mod_node_page_state(page_pgdat(page), idx, val); 1557 } 1558 1559 static inline void count_memcg_events(struct mem_cgroup *memcg, 1560 enum vm_event_item idx, 1561 unsigned long count) 1562 { 1563 } 1564 1565 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1566 enum vm_event_item idx, 1567 unsigned long count) 1568 { 1569 } 1570 1571 static inline void count_memcg_page_event(struct page *page, 1572 int idx) 1573 { 1574 } 1575 1576 static inline void count_memcg_folio_events(struct folio *folio, 1577 enum vm_event_item idx, unsigned long nr) 1578 { 1579 } 1580 1581 static inline 1582 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1583 { 1584 } 1585 1586 static inline void split_page_memcg(struct page *head, unsigned int nr) 1587 { 1588 } 1589 1590 static inline 1591 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1592 gfp_t gfp_mask, 1593 unsigned long *total_scanned) 1594 { 1595 return 0; 1596 } 1597 #endif /* CONFIG_MEMCG */ 1598 1599 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1600 { 1601 __mod_lruvec_kmem_state(p, idx, 1); 1602 } 1603 1604 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1605 { 1606 __mod_lruvec_kmem_state(p, idx, -1); 1607 } 1608 1609 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1610 { 1611 struct mem_cgroup *memcg; 1612 1613 memcg = lruvec_memcg(lruvec); 1614 if (!memcg) 1615 return NULL; 1616 memcg = parent_mem_cgroup(memcg); 1617 if (!memcg) 1618 return NULL; 1619 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1620 } 1621 1622 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1623 { 1624 spin_unlock(&lruvec->lru_lock); 1625 } 1626 1627 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1628 { 1629 spin_unlock_irq(&lruvec->lru_lock); 1630 } 1631 1632 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1633 unsigned long flags) 1634 { 1635 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1636 } 1637 1638 /* Test requires a stable page->memcg binding, see page_memcg() */ 1639 static inline bool folio_matches_lruvec(struct folio *folio, 1640 struct lruvec *lruvec) 1641 { 1642 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1643 lruvec_memcg(lruvec) == folio_memcg(folio); 1644 } 1645 1646 /* Don't lock again iff page's lruvec locked */ 1647 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1648 struct lruvec *locked_lruvec) 1649 { 1650 if (locked_lruvec) { 1651 if (folio_matches_lruvec(folio, locked_lruvec)) 1652 return locked_lruvec; 1653 1654 unlock_page_lruvec_irq(locked_lruvec); 1655 } 1656 1657 return folio_lruvec_lock_irq(folio); 1658 } 1659 1660 /* Don't lock again iff page's lruvec locked */ 1661 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1662 struct lruvec *locked_lruvec, unsigned long *flags) 1663 { 1664 if (locked_lruvec) { 1665 if (folio_matches_lruvec(folio, locked_lruvec)) 1666 return locked_lruvec; 1667 1668 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1669 } 1670 1671 return folio_lruvec_lock_irqsave(folio, flags); 1672 } 1673 1674 #ifdef CONFIG_CGROUP_WRITEBACK 1675 1676 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1677 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1678 unsigned long *pheadroom, unsigned long *pdirty, 1679 unsigned long *pwriteback); 1680 1681 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1682 struct bdi_writeback *wb); 1683 1684 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1685 struct bdi_writeback *wb) 1686 { 1687 if (mem_cgroup_disabled()) 1688 return; 1689 1690 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css)) 1691 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1692 } 1693 1694 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1695 1696 #else /* CONFIG_CGROUP_WRITEBACK */ 1697 1698 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1699 { 1700 return NULL; 1701 } 1702 1703 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1704 unsigned long *pfilepages, 1705 unsigned long *pheadroom, 1706 unsigned long *pdirty, 1707 unsigned long *pwriteback) 1708 { 1709 } 1710 1711 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1712 struct bdi_writeback *wb) 1713 { 1714 } 1715 1716 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1717 { 1718 } 1719 1720 #endif /* CONFIG_CGROUP_WRITEBACK */ 1721 1722 struct sock; 1723 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1724 gfp_t gfp_mask); 1725 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1726 #ifdef CONFIG_MEMCG 1727 extern struct static_key_false memcg_sockets_enabled_key; 1728 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1729 void mem_cgroup_sk_alloc(struct sock *sk); 1730 void mem_cgroup_sk_free(struct sock *sk); 1731 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1732 { 1733 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1734 return true; 1735 do { 1736 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1737 return true; 1738 } while ((memcg = parent_mem_cgroup(memcg))); 1739 return false; 1740 } 1741 1742 int alloc_shrinker_info(struct mem_cgroup *memcg); 1743 void free_shrinker_info(struct mem_cgroup *memcg); 1744 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1745 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1746 #else 1747 #define mem_cgroup_sockets_enabled 0 1748 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1749 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1750 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1751 { 1752 return false; 1753 } 1754 1755 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1756 int nid, int shrinker_id) 1757 { 1758 } 1759 #endif 1760 1761 #ifdef CONFIG_MEMCG_KMEM 1762 bool mem_cgroup_kmem_disabled(void); 1763 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1764 void __memcg_kmem_uncharge_page(struct page *page, int order); 1765 1766 struct obj_cgroup *get_obj_cgroup_from_current(void); 1767 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page); 1768 1769 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1770 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1771 1772 extern struct static_key_false memcg_kmem_enabled_key; 1773 1774 static inline bool memcg_kmem_enabled(void) 1775 { 1776 return static_branch_likely(&memcg_kmem_enabled_key); 1777 } 1778 1779 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1780 int order) 1781 { 1782 if (memcg_kmem_enabled()) 1783 return __memcg_kmem_charge_page(page, gfp, order); 1784 return 0; 1785 } 1786 1787 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1788 { 1789 if (memcg_kmem_enabled()) 1790 __memcg_kmem_uncharge_page(page, order); 1791 } 1792 1793 /* 1794 * A helper for accessing memcg's kmem_id, used for getting 1795 * corresponding LRU lists. 1796 */ 1797 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1798 { 1799 return memcg ? memcg->kmemcg_id : -1; 1800 } 1801 1802 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1803 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1804 1805 static inline void count_objcg_event(struct obj_cgroup *objcg, 1806 enum vm_event_item idx) 1807 { 1808 struct mem_cgroup *memcg; 1809 1810 if (mem_cgroup_kmem_disabled()) 1811 return; 1812 1813 rcu_read_lock(); 1814 memcg = obj_cgroup_memcg(objcg); 1815 count_memcg_events(memcg, idx, 1); 1816 rcu_read_unlock(); 1817 } 1818 1819 /** 1820 * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object. 1821 * @p: pointer to object from which memcg should be extracted. It can be NULL. 1822 * 1823 * Retrieves the memory group into which the memory of the pointed kernel 1824 * object is accounted. If memcg is found, its reference is taken. 1825 * If a passed kernel object is uncharged, or if proper memcg cannot be found, 1826 * as well as if mem_cgroup is disabled, NULL is returned. 1827 * 1828 * Return: valid memcg pointer with taken reference or NULL. 1829 */ 1830 static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p) 1831 { 1832 struct mem_cgroup *memcg; 1833 1834 rcu_read_lock(); 1835 do { 1836 memcg = mem_cgroup_from_obj(p); 1837 } while (memcg && !css_tryget(&memcg->css)); 1838 rcu_read_unlock(); 1839 return memcg; 1840 } 1841 1842 /** 1843 * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup. 1844 * @memcg: pointer to a valid memory cgroup or NULL. 1845 * 1846 * If passed argument is not NULL, returns it without any additional checks 1847 * and changes. Otherwise, root_mem_cgroup is returned. 1848 * 1849 * NOTE: root_mem_cgroup can be NULL during early boot. 1850 */ 1851 static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg) 1852 { 1853 return memcg ? memcg : root_mem_cgroup; 1854 } 1855 #else 1856 static inline bool mem_cgroup_kmem_disabled(void) 1857 { 1858 return true; 1859 } 1860 1861 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1862 int order) 1863 { 1864 return 0; 1865 } 1866 1867 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1868 { 1869 } 1870 1871 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1872 int order) 1873 { 1874 return 0; 1875 } 1876 1877 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1878 { 1879 } 1880 1881 static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page) 1882 { 1883 return NULL; 1884 } 1885 1886 static inline bool memcg_kmem_enabled(void) 1887 { 1888 return false; 1889 } 1890 1891 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1892 { 1893 return -1; 1894 } 1895 1896 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1897 { 1898 return NULL; 1899 } 1900 1901 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1902 { 1903 return NULL; 1904 } 1905 1906 static inline void count_objcg_event(struct obj_cgroup *objcg, 1907 enum vm_event_item idx) 1908 { 1909 } 1910 1911 static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p) 1912 { 1913 return NULL; 1914 } 1915 1916 static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg) 1917 { 1918 return NULL; 1919 } 1920 #endif /* CONFIG_MEMCG_KMEM */ 1921 1922 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1923 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1924 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1925 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1926 #else 1927 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1928 { 1929 return true; 1930 } 1931 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1932 size_t size) 1933 { 1934 } 1935 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1936 size_t size) 1937 { 1938 } 1939 #endif 1940 1941 #endif /* _LINUX_MEMCONTROL_H */ 1942