1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/kernel.h> 18 #include <linux/page_counter.h> 19 #include <linux/vmpressure.h> 20 #include <linux/eventfd.h> 21 #include <linux/mm.h> 22 #include <linux/vmstat.h> 23 #include <linux/writeback.h> 24 #include <linux/page-flags.h> 25 #include <linux/shrinker.h> 26 27 struct mem_cgroup; 28 struct obj_cgroup; 29 struct page; 30 struct mm_struct; 31 struct kmem_cache; 32 33 /* Cgroup-specific page state, on top of universal node page state */ 34 enum memcg_stat_item { 35 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 36 MEMCG_SOCK, 37 MEMCG_PERCPU_B, 38 MEMCG_VMALLOC, 39 MEMCG_KMEM, 40 MEMCG_ZSWAP_B, 41 MEMCG_ZSWAPPED, 42 MEMCG_NR_STAT, 43 }; 44 45 enum memcg_memory_event { 46 MEMCG_LOW, 47 MEMCG_HIGH, 48 MEMCG_MAX, 49 MEMCG_OOM, 50 MEMCG_OOM_KILL, 51 MEMCG_OOM_GROUP_KILL, 52 MEMCG_SWAP_HIGH, 53 MEMCG_SWAP_MAX, 54 MEMCG_SWAP_FAIL, 55 MEMCG_NR_MEMORY_EVENTS, 56 }; 57 58 struct mem_cgroup_reclaim_cookie { 59 pg_data_t *pgdat; 60 int generation; 61 }; 62 63 #ifdef CONFIG_MEMCG 64 65 #define MEM_CGROUP_ID_SHIFT 16 66 67 struct mem_cgroup_id { 68 int id; 69 refcount_t ref; 70 }; 71 72 struct memcg_vmstats_percpu; 73 struct memcg1_events_percpu; 74 struct memcg_vmstats; 75 struct lruvec_stats_percpu; 76 struct lruvec_stats; 77 78 struct mem_cgroup_reclaim_iter { 79 struct mem_cgroup *position; 80 /* scan generation, increased every round-trip */ 81 atomic_t generation; 82 }; 83 84 /* 85 * per-node information in memory controller. 86 */ 87 struct mem_cgroup_per_node { 88 /* Keep the read-only fields at the start */ 89 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 90 /* use container_of */ 91 92 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 93 struct lruvec_stats *lruvec_stats; 94 struct shrinker_info __rcu *shrinker_info; 95 96 #ifdef CONFIG_MEMCG_V1 97 /* 98 * Memcg-v1 only stuff in middle as buffer between read mostly fields 99 * and update often fields to avoid false sharing. If v1 stuff is 100 * not present, an explicit padding is needed. 101 */ 102 103 struct rb_node tree_node; /* RB tree node */ 104 unsigned long usage_in_excess;/* Set to the value by which */ 105 /* the soft limit is exceeded*/ 106 bool on_tree; 107 #else 108 CACHELINE_PADDING(_pad1_); 109 #endif 110 111 /* Fields which get updated often at the end. */ 112 struct lruvec lruvec; 113 CACHELINE_PADDING(_pad2_); 114 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 115 struct mem_cgroup_reclaim_iter iter; 116 }; 117 118 struct mem_cgroup_threshold { 119 struct eventfd_ctx *eventfd; 120 unsigned long threshold; 121 }; 122 123 /* For threshold */ 124 struct mem_cgroup_threshold_ary { 125 /* An array index points to threshold just below or equal to usage. */ 126 int current_threshold; 127 /* Size of entries[] */ 128 unsigned int size; 129 /* Array of thresholds */ 130 struct mem_cgroup_threshold entries[] __counted_by(size); 131 }; 132 133 struct mem_cgroup_thresholds { 134 /* Primary thresholds array */ 135 struct mem_cgroup_threshold_ary *primary; 136 /* 137 * Spare threshold array. 138 * This is needed to make mem_cgroup_unregister_event() "never fail". 139 * It must be able to store at least primary->size - 1 entries. 140 */ 141 struct mem_cgroup_threshold_ary *spare; 142 }; 143 144 /* 145 * Remember four most recent foreign writebacks with dirty pages in this 146 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 147 * one in a given round, we're likely to catch it later if it keeps 148 * foreign-dirtying, so a fairly low count should be enough. 149 * 150 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 151 */ 152 #define MEMCG_CGWB_FRN_CNT 4 153 154 struct memcg_cgwb_frn { 155 u64 bdi_id; /* bdi->id of the foreign inode */ 156 int memcg_id; /* memcg->css.id of foreign inode */ 157 u64 at; /* jiffies_64 at the time of dirtying */ 158 struct wb_completion done; /* tracks in-flight foreign writebacks */ 159 }; 160 161 /* 162 * Bucket for arbitrarily byte-sized objects charged to a memory 163 * cgroup. The bucket can be reparented in one piece when the cgroup 164 * is destroyed, without having to round up the individual references 165 * of all live memory objects in the wild. 166 */ 167 struct obj_cgroup { 168 struct percpu_ref refcnt; 169 struct mem_cgroup *memcg; 170 atomic_t nr_charged_bytes; 171 union { 172 struct list_head list; /* protected by objcg_lock */ 173 struct rcu_head rcu; 174 }; 175 }; 176 177 /* 178 * The memory controller data structure. The memory controller controls both 179 * page cache and RSS per cgroup. We would eventually like to provide 180 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 181 * to help the administrator determine what knobs to tune. 182 */ 183 struct mem_cgroup { 184 struct cgroup_subsys_state css; 185 186 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 187 struct mem_cgroup_id id; 188 189 /* Accounted resources */ 190 struct page_counter memory; /* Both v1 & v2 */ 191 192 union { 193 struct page_counter swap; /* v2 only */ 194 struct page_counter memsw; /* v1 only */ 195 }; 196 197 /* registered local peak watchers */ 198 struct list_head memory_peaks; 199 struct list_head swap_peaks; 200 spinlock_t peaks_lock; 201 202 /* Range enforcement for interrupt charges */ 203 struct work_struct high_work; 204 205 #ifdef CONFIG_ZSWAP 206 unsigned long zswap_max; 207 208 /* 209 * Prevent pages from this memcg from being written back from zswap to 210 * swap, and from being swapped out on zswap store failures. 211 */ 212 bool zswap_writeback; 213 #endif 214 215 /* vmpressure notifications */ 216 struct vmpressure vmpressure; 217 218 /* 219 * Should the OOM killer kill all belonging tasks, had it kill one? 220 */ 221 bool oom_group; 222 223 int swappiness; 224 225 /* memory.events and memory.events.local */ 226 struct cgroup_file events_file; 227 struct cgroup_file events_local_file; 228 229 /* handle for "memory.swap.events" */ 230 struct cgroup_file swap_events_file; 231 232 /* memory.stat */ 233 struct memcg_vmstats *vmstats; 234 235 /* memory.events */ 236 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 237 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 238 239 /* 240 * Hint of reclaim pressure for socket memroy management. Note 241 * that this indicator should NOT be used in legacy cgroup mode 242 * where socket memory is accounted/charged separately. 243 */ 244 unsigned long socket_pressure; 245 246 int kmemcg_id; 247 /* 248 * memcg->objcg is wiped out as a part of the objcg repaprenting 249 * process. memcg->orig_objcg preserves a pointer (and a reference) 250 * to the original objcg until the end of live of memcg. 251 */ 252 struct obj_cgroup __rcu *objcg; 253 struct obj_cgroup *orig_objcg; 254 /* list of inherited objcgs, protected by objcg_lock */ 255 struct list_head objcg_list; 256 257 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 258 259 #ifdef CONFIG_CGROUP_WRITEBACK 260 struct list_head cgwb_list; 261 struct wb_domain cgwb_domain; 262 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 263 #endif 264 265 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 266 struct deferred_split deferred_split_queue; 267 #endif 268 269 #ifdef CONFIG_LRU_GEN_WALKS_MMU 270 /* per-memcg mm_struct list */ 271 struct lru_gen_mm_list mm_list; 272 #endif 273 274 #ifdef CONFIG_MEMCG_V1 275 /* Legacy consumer-oriented counters */ 276 struct page_counter kmem; /* v1 only */ 277 struct page_counter tcpmem; /* v1 only */ 278 279 struct memcg1_events_percpu __percpu *events_percpu; 280 281 unsigned long soft_limit; 282 283 /* protected by memcg_oom_lock */ 284 bool oom_lock; 285 int under_oom; 286 287 /* OOM-Killer disable */ 288 int oom_kill_disable; 289 290 /* protect arrays of thresholds */ 291 struct mutex thresholds_lock; 292 293 /* thresholds for memory usage. RCU-protected */ 294 struct mem_cgroup_thresholds thresholds; 295 296 /* thresholds for mem+swap usage. RCU-protected */ 297 struct mem_cgroup_thresholds memsw_thresholds; 298 299 /* For oom notifier event fd */ 300 struct list_head oom_notify; 301 302 /* Legacy tcp memory accounting */ 303 bool tcpmem_active; 304 int tcpmem_pressure; 305 306 /* List of events which userspace want to receive */ 307 struct list_head event_list; 308 spinlock_t event_list_lock; 309 #endif /* CONFIG_MEMCG_V1 */ 310 311 struct mem_cgroup_per_node *nodeinfo[]; 312 }; 313 314 /* 315 * size of first charge trial. 316 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 317 * workload. 318 */ 319 #define MEMCG_CHARGE_BATCH 64U 320 321 extern struct mem_cgroup *root_mem_cgroup; 322 323 enum page_memcg_data_flags { 324 /* page->memcg_data is a pointer to an slabobj_ext vector */ 325 MEMCG_DATA_OBJEXTS = (1UL << 0), 326 /* page has been accounted as a non-slab kernel page */ 327 MEMCG_DATA_KMEM = (1UL << 1), 328 /* the next bit after the last actual flag */ 329 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 330 }; 331 332 #define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS 333 334 #else /* CONFIG_MEMCG */ 335 336 #define __FIRST_OBJEXT_FLAG (1UL << 0) 337 338 #endif /* CONFIG_MEMCG */ 339 340 enum objext_flags { 341 /* slabobj_ext vector failed to allocate */ 342 OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG, 343 /* the next bit after the last actual flag */ 344 __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1), 345 }; 346 347 #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) 348 349 #ifdef CONFIG_MEMCG 350 351 static inline bool folio_memcg_kmem(struct folio *folio); 352 353 /* 354 * After the initialization objcg->memcg is always pointing at 355 * a valid memcg, but can be atomically swapped to the parent memcg. 356 * 357 * The caller must ensure that the returned memcg won't be released. 358 */ 359 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 360 { 361 lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex)); 362 return READ_ONCE(objcg->memcg); 363 } 364 365 /* 366 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 367 * @folio: Pointer to the folio. 368 * 369 * Returns a pointer to the memory cgroup associated with the folio, 370 * or NULL. This function assumes that the folio is known to have a 371 * proper memory cgroup pointer. It's not safe to call this function 372 * against some type of folios, e.g. slab folios or ex-slab folios or 373 * kmem folios. 374 */ 375 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 376 { 377 unsigned long memcg_data = folio->memcg_data; 378 379 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 380 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 381 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 382 383 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 384 } 385 386 /* 387 * __folio_objcg - get the object cgroup associated with a kmem folio. 388 * @folio: Pointer to the folio. 389 * 390 * Returns a pointer to the object cgroup associated with the folio, 391 * or NULL. This function assumes that the folio is known to have a 392 * proper object cgroup pointer. It's not safe to call this function 393 * against some type of folios, e.g. slab folios or ex-slab folios or 394 * LRU folios. 395 */ 396 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 397 { 398 unsigned long memcg_data = folio->memcg_data; 399 400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 402 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 403 404 return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 405 } 406 407 /* 408 * folio_memcg - Get the memory cgroup associated with a folio. 409 * @folio: Pointer to the folio. 410 * 411 * Returns a pointer to the memory cgroup associated with the folio, 412 * or NULL. This function assumes that the folio is known to have a 413 * proper memory cgroup pointer. It's not safe to call this function 414 * against some type of folios, e.g. slab folios or ex-slab folios. 415 * 416 * For a non-kmem folio any of the following ensures folio and memcg binding 417 * stability: 418 * 419 * - the folio lock 420 * - LRU isolation 421 * - exclusive reference 422 * 423 * For a kmem folio a caller should hold an rcu read lock to protect memcg 424 * associated with a kmem folio from being released. 425 */ 426 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 427 { 428 if (folio_memcg_kmem(folio)) 429 return obj_cgroup_memcg(__folio_objcg(folio)); 430 return __folio_memcg(folio); 431 } 432 433 /* 434 * folio_memcg_charged - If a folio is charged to a memory cgroup. 435 * @folio: Pointer to the folio. 436 * 437 * Returns true if folio is charged to a memory cgroup, otherwise returns false. 438 */ 439 static inline bool folio_memcg_charged(struct folio *folio) 440 { 441 if (folio_memcg_kmem(folio)) 442 return __folio_objcg(folio) != NULL; 443 return __folio_memcg(folio) != NULL; 444 } 445 446 /* 447 * folio_memcg_check - Get the memory cgroup associated with a folio. 448 * @folio: Pointer to the folio. 449 * 450 * Returns a pointer to the memory cgroup associated with the folio, 451 * or NULL. This function unlike folio_memcg() can take any folio 452 * as an argument. It has to be used in cases when it's not known if a folio 453 * has an associated memory cgroup pointer or an object cgroups vector or 454 * an object cgroup. 455 * 456 * For a non-kmem folio any of the following ensures folio and memcg binding 457 * stability: 458 * 459 * - the folio lock 460 * - LRU isolation 461 * - exclusive reference 462 * 463 * For a kmem folio a caller should hold an rcu read lock to protect memcg 464 * associated with a kmem folio from being released. 465 */ 466 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 467 { 468 /* 469 * Because folio->memcg_data might be changed asynchronously 470 * for slabs, READ_ONCE() should be used here. 471 */ 472 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 473 474 if (memcg_data & MEMCG_DATA_OBJEXTS) 475 return NULL; 476 477 if (memcg_data & MEMCG_DATA_KMEM) { 478 struct obj_cgroup *objcg; 479 480 objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 481 return obj_cgroup_memcg(objcg); 482 } 483 484 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 485 } 486 487 static inline struct mem_cgroup *page_memcg_check(struct page *page) 488 { 489 if (PageTail(page)) 490 return NULL; 491 return folio_memcg_check((struct folio *)page); 492 } 493 494 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 495 { 496 struct mem_cgroup *memcg; 497 498 rcu_read_lock(); 499 retry: 500 memcg = obj_cgroup_memcg(objcg); 501 if (unlikely(!css_tryget(&memcg->css))) 502 goto retry; 503 rcu_read_unlock(); 504 505 return memcg; 506 } 507 508 /* 509 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 510 * @folio: Pointer to the folio. 511 * 512 * Checks if the folio has MemcgKmem flag set. The caller must ensure 513 * that the folio has an associated memory cgroup. It's not safe to call 514 * this function against some types of folios, e.g. slab folios. 515 */ 516 static inline bool folio_memcg_kmem(struct folio *folio) 517 { 518 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 519 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio); 520 return folio->memcg_data & MEMCG_DATA_KMEM; 521 } 522 523 static inline bool PageMemcgKmem(struct page *page) 524 { 525 return folio_memcg_kmem(page_folio(page)); 526 } 527 528 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 529 { 530 return (memcg == root_mem_cgroup); 531 } 532 533 static inline bool mem_cgroup_disabled(void) 534 { 535 return !cgroup_subsys_enabled(memory_cgrp_subsys); 536 } 537 538 static inline void mem_cgroup_protection(struct mem_cgroup *root, 539 struct mem_cgroup *memcg, 540 unsigned long *min, 541 unsigned long *low) 542 { 543 *min = *low = 0; 544 545 if (mem_cgroup_disabled()) 546 return; 547 548 /* 549 * There is no reclaim protection applied to a targeted reclaim. 550 * We are special casing this specific case here because 551 * mem_cgroup_calculate_protection is not robust enough to keep 552 * the protection invariant for calculated effective values for 553 * parallel reclaimers with different reclaim target. This is 554 * especially a problem for tail memcgs (as they have pages on LRU) 555 * which would want to have effective values 0 for targeted reclaim 556 * but a different value for external reclaim. 557 * 558 * Example 559 * Let's have global and A's reclaim in parallel: 560 * | 561 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 562 * |\ 563 * | C (low = 1G, usage = 2.5G) 564 * B (low = 1G, usage = 0.5G) 565 * 566 * For the global reclaim 567 * A.elow = A.low 568 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 569 * C.elow = min(C.usage, C.low) 570 * 571 * With the effective values resetting we have A reclaim 572 * A.elow = 0 573 * B.elow = B.low 574 * C.elow = C.low 575 * 576 * If the global reclaim races with A's reclaim then 577 * B.elow = C.elow = 0 because children_low_usage > A.elow) 578 * is possible and reclaiming B would be violating the protection. 579 * 580 */ 581 if (root == memcg) 582 return; 583 584 *min = READ_ONCE(memcg->memory.emin); 585 *low = READ_ONCE(memcg->memory.elow); 586 } 587 588 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 589 struct mem_cgroup *memcg); 590 591 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 592 struct mem_cgroup *memcg) 593 { 594 /* 595 * The root memcg doesn't account charges, and doesn't support 596 * protection. The target memcg's protection is ignored, see 597 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 598 */ 599 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 600 memcg == target; 601 } 602 603 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 604 struct mem_cgroup *memcg) 605 { 606 if (mem_cgroup_unprotected(target, memcg)) 607 return false; 608 609 return READ_ONCE(memcg->memory.elow) >= 610 page_counter_read(&memcg->memory); 611 } 612 613 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 614 struct mem_cgroup *memcg) 615 { 616 if (mem_cgroup_unprotected(target, memcg)) 617 return false; 618 619 return READ_ONCE(memcg->memory.emin) >= 620 page_counter_read(&memcg->memory); 621 } 622 623 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 624 625 /** 626 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 627 * @folio: Folio to charge. 628 * @mm: mm context of the allocating task. 629 * @gfp: Reclaim mode. 630 * 631 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 632 * pages according to @gfp if necessary. If @mm is NULL, try to 633 * charge to the active memcg. 634 * 635 * Do not use this for folios allocated for swapin. 636 * 637 * Return: 0 on success. Otherwise, an error code is returned. 638 */ 639 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 640 gfp_t gfp) 641 { 642 if (mem_cgroup_disabled()) 643 return 0; 644 return __mem_cgroup_charge(folio, mm, gfp); 645 } 646 647 int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); 648 649 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 650 gfp_t gfp, swp_entry_t entry); 651 652 void __mem_cgroup_uncharge(struct folio *folio); 653 654 /** 655 * mem_cgroup_uncharge - Uncharge a folio. 656 * @folio: Folio to uncharge. 657 * 658 * Uncharge a folio previously charged with mem_cgroup_charge(). 659 */ 660 static inline void mem_cgroup_uncharge(struct folio *folio) 661 { 662 if (mem_cgroup_disabled()) 663 return; 664 __mem_cgroup_uncharge(folio); 665 } 666 667 void __mem_cgroup_uncharge_folios(struct folio_batch *folios); 668 static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 669 { 670 if (mem_cgroup_disabled()) 671 return; 672 __mem_cgroup_uncharge_folios(folios); 673 } 674 675 void mem_cgroup_replace_folio(struct folio *old, struct folio *new); 676 void mem_cgroup_migrate(struct folio *old, struct folio *new); 677 678 /** 679 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 680 * @memcg: memcg of the wanted lruvec 681 * @pgdat: pglist_data 682 * 683 * Returns the lru list vector holding pages for a given @memcg & 684 * @pgdat combination. This can be the node lruvec, if the memory 685 * controller is disabled. 686 */ 687 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 688 struct pglist_data *pgdat) 689 { 690 struct mem_cgroup_per_node *mz; 691 struct lruvec *lruvec; 692 693 if (mem_cgroup_disabled()) { 694 lruvec = &pgdat->__lruvec; 695 goto out; 696 } 697 698 if (!memcg) 699 memcg = root_mem_cgroup; 700 701 mz = memcg->nodeinfo[pgdat->node_id]; 702 lruvec = &mz->lruvec; 703 out: 704 /* 705 * Since a node can be onlined after the mem_cgroup was created, 706 * we have to be prepared to initialize lruvec->pgdat here; 707 * and if offlined then reonlined, we need to reinitialize it. 708 */ 709 if (unlikely(lruvec->pgdat != pgdat)) 710 lruvec->pgdat = pgdat; 711 return lruvec; 712 } 713 714 /** 715 * folio_lruvec - return lruvec for isolating/putting an LRU folio 716 * @folio: Pointer to the folio. 717 * 718 * This function relies on folio->mem_cgroup being stable. 719 */ 720 static inline struct lruvec *folio_lruvec(struct folio *folio) 721 { 722 struct mem_cgroup *memcg = folio_memcg(folio); 723 724 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 725 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 726 } 727 728 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 729 730 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 731 732 struct mem_cgroup *get_mem_cgroup_from_current(void); 733 734 struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio); 735 736 struct lruvec *folio_lruvec_lock(struct folio *folio); 737 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 738 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 739 unsigned long *flags); 740 741 #ifdef CONFIG_DEBUG_VM 742 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 743 #else 744 static inline 745 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 746 { 747 } 748 #endif 749 750 static inline 751 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 752 return css ? container_of(css, struct mem_cgroup, css) : NULL; 753 } 754 755 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 756 { 757 return percpu_ref_tryget(&objcg->refcnt); 758 } 759 760 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 761 { 762 percpu_ref_get(&objcg->refcnt); 763 } 764 765 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 766 unsigned long nr) 767 { 768 percpu_ref_get_many(&objcg->refcnt, nr); 769 } 770 771 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 772 { 773 if (objcg) 774 percpu_ref_put(&objcg->refcnt); 775 } 776 777 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 778 { 779 return !memcg || css_tryget(&memcg->css); 780 } 781 782 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 783 { 784 return !memcg || css_tryget_online(&memcg->css); 785 } 786 787 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 788 { 789 if (memcg) 790 css_put(&memcg->css); 791 } 792 793 #define mem_cgroup_from_counter(counter, member) \ 794 container_of(counter, struct mem_cgroup, member) 795 796 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 797 struct mem_cgroup *, 798 struct mem_cgroup_reclaim_cookie *); 799 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 800 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 801 int (*)(struct task_struct *, void *), void *arg); 802 803 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 804 { 805 if (mem_cgroup_disabled()) 806 return 0; 807 808 return memcg->id.id; 809 } 810 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 811 812 #ifdef CONFIG_SHRINKER_DEBUG 813 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 814 { 815 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 816 } 817 818 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 819 #endif 820 821 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 822 { 823 return mem_cgroup_from_css(seq_css(m)); 824 } 825 826 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 827 { 828 struct mem_cgroup_per_node *mz; 829 830 if (mem_cgroup_disabled()) 831 return NULL; 832 833 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 834 return mz->memcg; 835 } 836 837 /** 838 * parent_mem_cgroup - find the accounting parent of a memcg 839 * @memcg: memcg whose parent to find 840 * 841 * Returns the parent memcg, or NULL if this is the root. 842 */ 843 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 844 { 845 return mem_cgroup_from_css(memcg->css.parent); 846 } 847 848 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 849 struct mem_cgroup *root) 850 { 851 if (root == memcg) 852 return true; 853 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 854 } 855 856 static inline bool mm_match_cgroup(struct mm_struct *mm, 857 struct mem_cgroup *memcg) 858 { 859 struct mem_cgroup *task_memcg; 860 bool match = false; 861 862 rcu_read_lock(); 863 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 864 if (task_memcg) 865 match = mem_cgroup_is_descendant(task_memcg, memcg); 866 rcu_read_unlock(); 867 return match; 868 } 869 870 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 871 ino_t page_cgroup_ino(struct page *page); 872 873 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 874 { 875 if (mem_cgroup_disabled()) 876 return true; 877 return !!(memcg->css.flags & CSS_ONLINE); 878 } 879 880 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 881 int zid, int nr_pages); 882 883 static inline 884 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 885 enum lru_list lru, int zone_idx) 886 { 887 struct mem_cgroup_per_node *mz; 888 889 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 890 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 891 } 892 893 void mem_cgroup_handle_over_high(gfp_t gfp_mask); 894 895 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 896 897 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 898 899 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 900 struct task_struct *p); 901 902 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 903 904 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 905 struct mem_cgroup *oom_domain); 906 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 907 908 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, 909 int val); 910 911 /* idx can be of type enum memcg_stat_item or node_stat_item */ 912 static inline void mod_memcg_state(struct mem_cgroup *memcg, 913 enum memcg_stat_item idx, int val) 914 { 915 unsigned long flags; 916 917 local_irq_save(flags); 918 __mod_memcg_state(memcg, idx, val); 919 local_irq_restore(flags); 920 } 921 922 static inline void mod_memcg_page_state(struct page *page, 923 enum memcg_stat_item idx, int val) 924 { 925 struct mem_cgroup *memcg; 926 927 if (mem_cgroup_disabled()) 928 return; 929 930 rcu_read_lock(); 931 memcg = folio_memcg(page_folio(page)); 932 if (memcg) 933 mod_memcg_state(memcg, idx, val); 934 rcu_read_unlock(); 935 } 936 937 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 938 unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx); 939 unsigned long lruvec_page_state_local(struct lruvec *lruvec, 940 enum node_stat_item idx); 941 942 void mem_cgroup_flush_stats(struct mem_cgroup *memcg); 943 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); 944 945 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 946 947 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 948 int val) 949 { 950 unsigned long flags; 951 952 local_irq_save(flags); 953 __mod_lruvec_kmem_state(p, idx, val); 954 local_irq_restore(flags); 955 } 956 957 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 958 unsigned long count); 959 960 static inline void count_memcg_events(struct mem_cgroup *memcg, 961 enum vm_event_item idx, 962 unsigned long count) 963 { 964 unsigned long flags; 965 966 local_irq_save(flags); 967 __count_memcg_events(memcg, idx, count); 968 local_irq_restore(flags); 969 } 970 971 static inline void count_memcg_folio_events(struct folio *folio, 972 enum vm_event_item idx, unsigned long nr) 973 { 974 struct mem_cgroup *memcg = folio_memcg(folio); 975 976 if (memcg) 977 count_memcg_events(memcg, idx, nr); 978 } 979 980 static inline void count_memcg_events_mm(struct mm_struct *mm, 981 enum vm_event_item idx, unsigned long count) 982 { 983 struct mem_cgroup *memcg; 984 985 if (mem_cgroup_disabled()) 986 return; 987 988 rcu_read_lock(); 989 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 990 if (likely(memcg)) 991 count_memcg_events(memcg, idx, count); 992 rcu_read_unlock(); 993 } 994 995 static inline void count_memcg_event_mm(struct mm_struct *mm, 996 enum vm_event_item idx) 997 { 998 count_memcg_events_mm(mm, idx, 1); 999 } 1000 1001 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1002 enum memcg_memory_event event) 1003 { 1004 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1005 event == MEMCG_SWAP_FAIL; 1006 1007 atomic_long_inc(&memcg->memory_events_local[event]); 1008 if (!swap_event) 1009 cgroup_file_notify(&memcg->events_local_file); 1010 1011 do { 1012 atomic_long_inc(&memcg->memory_events[event]); 1013 if (swap_event) 1014 cgroup_file_notify(&memcg->swap_events_file); 1015 else 1016 cgroup_file_notify(&memcg->events_file); 1017 1018 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1019 break; 1020 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1021 break; 1022 } while ((memcg = parent_mem_cgroup(memcg)) && 1023 !mem_cgroup_is_root(memcg)); 1024 } 1025 1026 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1027 enum memcg_memory_event event) 1028 { 1029 struct mem_cgroup *memcg; 1030 1031 if (mem_cgroup_disabled()) 1032 return; 1033 1034 rcu_read_lock(); 1035 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1036 if (likely(memcg)) 1037 memcg_memory_event(memcg, event); 1038 rcu_read_unlock(); 1039 } 1040 1041 void split_page_memcg(struct page *head, int old_order, int new_order); 1042 void folio_split_memcg_refs(struct folio *folio, unsigned old_order, 1043 unsigned new_order); 1044 1045 static inline u64 cgroup_id_from_mm(struct mm_struct *mm) 1046 { 1047 struct mem_cgroup *memcg; 1048 u64 id; 1049 1050 if (mem_cgroup_disabled()) 1051 return 0; 1052 1053 rcu_read_lock(); 1054 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1055 if (!memcg) 1056 memcg = root_mem_cgroup; 1057 id = cgroup_id(memcg->css.cgroup); 1058 rcu_read_unlock(); 1059 return id; 1060 } 1061 1062 #else /* CONFIG_MEMCG */ 1063 1064 #define MEM_CGROUP_ID_SHIFT 0 1065 1066 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1067 { 1068 return NULL; 1069 } 1070 1071 static inline bool folio_memcg_charged(struct folio *folio) 1072 { 1073 return false; 1074 } 1075 1076 static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1077 { 1078 return NULL; 1079 } 1080 1081 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1082 { 1083 return NULL; 1084 } 1085 1086 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 1087 { 1088 return NULL; 1089 } 1090 1091 static inline bool folio_memcg_kmem(struct folio *folio) 1092 { 1093 return false; 1094 } 1095 1096 static inline bool PageMemcgKmem(struct page *page) 1097 { 1098 return false; 1099 } 1100 1101 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1102 { 1103 return true; 1104 } 1105 1106 static inline bool mem_cgroup_disabled(void) 1107 { 1108 return true; 1109 } 1110 1111 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1112 enum memcg_memory_event event) 1113 { 1114 } 1115 1116 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1117 enum memcg_memory_event event) 1118 { 1119 } 1120 1121 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1122 struct mem_cgroup *memcg, 1123 unsigned long *min, 1124 unsigned long *low) 1125 { 1126 *min = *low = 0; 1127 } 1128 1129 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1130 struct mem_cgroup *memcg) 1131 { 1132 } 1133 1134 static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1135 struct mem_cgroup *memcg) 1136 { 1137 return true; 1138 } 1139 static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1140 struct mem_cgroup *memcg) 1141 { 1142 return false; 1143 } 1144 1145 static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1146 struct mem_cgroup *memcg) 1147 { 1148 return false; 1149 } 1150 1151 static inline int mem_cgroup_charge(struct folio *folio, 1152 struct mm_struct *mm, gfp_t gfp) 1153 { 1154 return 0; 1155 } 1156 1157 static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) 1158 { 1159 return 0; 1160 } 1161 1162 static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1163 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1164 { 1165 return 0; 1166 } 1167 1168 static inline void mem_cgroup_uncharge(struct folio *folio) 1169 { 1170 } 1171 1172 static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 1173 { 1174 } 1175 1176 static inline void mem_cgroup_replace_folio(struct folio *old, 1177 struct folio *new) 1178 { 1179 } 1180 1181 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1182 { 1183 } 1184 1185 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1186 struct pglist_data *pgdat) 1187 { 1188 return &pgdat->__lruvec; 1189 } 1190 1191 static inline struct lruvec *folio_lruvec(struct folio *folio) 1192 { 1193 struct pglist_data *pgdat = folio_pgdat(folio); 1194 return &pgdat->__lruvec; 1195 } 1196 1197 static inline 1198 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1199 { 1200 } 1201 1202 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1203 { 1204 return NULL; 1205 } 1206 1207 static inline bool mm_match_cgroup(struct mm_struct *mm, 1208 struct mem_cgroup *memcg) 1209 { 1210 return true; 1211 } 1212 1213 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1214 { 1215 return NULL; 1216 } 1217 1218 static inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1219 { 1220 return NULL; 1221 } 1222 1223 static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 1224 { 1225 return NULL; 1226 } 1227 1228 static inline 1229 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1230 { 1231 return NULL; 1232 } 1233 1234 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 1235 { 1236 } 1237 1238 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1239 { 1240 } 1241 1242 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1243 { 1244 return true; 1245 } 1246 1247 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 1248 { 1249 return true; 1250 } 1251 1252 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1253 { 1254 } 1255 1256 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1257 { 1258 struct pglist_data *pgdat = folio_pgdat(folio); 1259 1260 spin_lock(&pgdat->__lruvec.lru_lock); 1261 return &pgdat->__lruvec; 1262 } 1263 1264 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1265 { 1266 struct pglist_data *pgdat = folio_pgdat(folio); 1267 1268 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1269 return &pgdat->__lruvec; 1270 } 1271 1272 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1273 unsigned long *flagsp) 1274 { 1275 struct pglist_data *pgdat = folio_pgdat(folio); 1276 1277 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1278 return &pgdat->__lruvec; 1279 } 1280 1281 static inline struct mem_cgroup * 1282 mem_cgroup_iter(struct mem_cgroup *root, 1283 struct mem_cgroup *prev, 1284 struct mem_cgroup_reclaim_cookie *reclaim) 1285 { 1286 return NULL; 1287 } 1288 1289 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1290 struct mem_cgroup *prev) 1291 { 1292 } 1293 1294 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1295 int (*fn)(struct task_struct *, void *), void *arg) 1296 { 1297 } 1298 1299 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1300 { 1301 return 0; 1302 } 1303 1304 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1305 { 1306 WARN_ON_ONCE(id); 1307 /* XXX: This should always return root_mem_cgroup */ 1308 return NULL; 1309 } 1310 1311 #ifdef CONFIG_SHRINKER_DEBUG 1312 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1313 { 1314 return 0; 1315 } 1316 1317 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1318 { 1319 return NULL; 1320 } 1321 #endif 1322 1323 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1324 { 1325 return NULL; 1326 } 1327 1328 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1329 { 1330 return NULL; 1331 } 1332 1333 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1334 { 1335 return true; 1336 } 1337 1338 static inline 1339 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1340 enum lru_list lru, int zone_idx) 1341 { 1342 return 0; 1343 } 1344 1345 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1346 { 1347 return 0; 1348 } 1349 1350 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1351 { 1352 return 0; 1353 } 1354 1355 static inline void 1356 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1357 { 1358 } 1359 1360 static inline void 1361 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1362 { 1363 } 1364 1365 static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) 1366 { 1367 } 1368 1369 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1370 struct task_struct *victim, struct mem_cgroup *oom_domain) 1371 { 1372 return NULL; 1373 } 1374 1375 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1376 { 1377 } 1378 1379 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1380 enum memcg_stat_item idx, 1381 int nr) 1382 { 1383 } 1384 1385 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1386 enum memcg_stat_item idx, 1387 int nr) 1388 { 1389 } 1390 1391 static inline void mod_memcg_page_state(struct page *page, 1392 enum memcg_stat_item idx, int val) 1393 { 1394 } 1395 1396 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1397 { 1398 return 0; 1399 } 1400 1401 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1402 enum node_stat_item idx) 1403 { 1404 return node_page_state(lruvec_pgdat(lruvec), idx); 1405 } 1406 1407 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1408 enum node_stat_item idx) 1409 { 1410 return node_page_state(lruvec_pgdat(lruvec), idx); 1411 } 1412 1413 static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 1414 { 1415 } 1416 1417 static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 1418 { 1419 } 1420 1421 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1422 int val) 1423 { 1424 struct page *page = virt_to_head_page(p); 1425 1426 __mod_node_page_state(page_pgdat(page), idx, val); 1427 } 1428 1429 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1430 int val) 1431 { 1432 struct page *page = virt_to_head_page(p); 1433 1434 mod_node_page_state(page_pgdat(page), idx, val); 1435 } 1436 1437 static inline void count_memcg_events(struct mem_cgroup *memcg, 1438 enum vm_event_item idx, 1439 unsigned long count) 1440 { 1441 } 1442 1443 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1444 enum vm_event_item idx, 1445 unsigned long count) 1446 { 1447 } 1448 1449 static inline void count_memcg_folio_events(struct folio *folio, 1450 enum vm_event_item idx, unsigned long nr) 1451 { 1452 } 1453 1454 static inline void count_memcg_events_mm(struct mm_struct *mm, 1455 enum vm_event_item idx, unsigned long count) 1456 { 1457 } 1458 1459 static inline 1460 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1461 { 1462 } 1463 1464 static inline void split_page_memcg(struct page *head, int old_order, int new_order) 1465 { 1466 } 1467 1468 static inline void folio_split_memcg_refs(struct folio *folio, 1469 unsigned old_order, unsigned new_order) 1470 { 1471 } 1472 1473 static inline u64 cgroup_id_from_mm(struct mm_struct *mm) 1474 { 1475 return 0; 1476 } 1477 #endif /* CONFIG_MEMCG */ 1478 1479 /* 1480 * Extended information for slab objects stored as an array in page->memcg_data 1481 * if MEMCG_DATA_OBJEXTS is set. 1482 */ 1483 struct slabobj_ext { 1484 #ifdef CONFIG_MEMCG 1485 struct obj_cgroup *objcg; 1486 #endif 1487 #ifdef CONFIG_MEM_ALLOC_PROFILING 1488 union codetag_ref ref; 1489 #endif 1490 } __aligned(8); 1491 1492 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1493 { 1494 __mod_lruvec_kmem_state(p, idx, 1); 1495 } 1496 1497 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1498 { 1499 __mod_lruvec_kmem_state(p, idx, -1); 1500 } 1501 1502 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1503 { 1504 struct mem_cgroup *memcg; 1505 1506 memcg = lruvec_memcg(lruvec); 1507 if (!memcg) 1508 return NULL; 1509 memcg = parent_mem_cgroup(memcg); 1510 if (!memcg) 1511 return NULL; 1512 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1513 } 1514 1515 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1516 { 1517 spin_unlock(&lruvec->lru_lock); 1518 } 1519 1520 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1521 { 1522 spin_unlock_irq(&lruvec->lru_lock); 1523 } 1524 1525 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1526 unsigned long flags) 1527 { 1528 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1529 } 1530 1531 /* Test requires a stable folio->memcg binding, see folio_memcg() */ 1532 static inline bool folio_matches_lruvec(struct folio *folio, 1533 struct lruvec *lruvec) 1534 { 1535 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1536 lruvec_memcg(lruvec) == folio_memcg(folio); 1537 } 1538 1539 /* Don't lock again iff page's lruvec locked */ 1540 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1541 struct lruvec *locked_lruvec) 1542 { 1543 if (locked_lruvec) { 1544 if (folio_matches_lruvec(folio, locked_lruvec)) 1545 return locked_lruvec; 1546 1547 unlock_page_lruvec_irq(locked_lruvec); 1548 } 1549 1550 return folio_lruvec_lock_irq(folio); 1551 } 1552 1553 /* Don't lock again iff folio's lruvec locked */ 1554 static inline void folio_lruvec_relock_irqsave(struct folio *folio, 1555 struct lruvec **lruvecp, unsigned long *flags) 1556 { 1557 if (*lruvecp) { 1558 if (folio_matches_lruvec(folio, *lruvecp)) 1559 return; 1560 1561 unlock_page_lruvec_irqrestore(*lruvecp, *flags); 1562 } 1563 1564 *lruvecp = folio_lruvec_lock_irqsave(folio, flags); 1565 } 1566 1567 #ifdef CONFIG_CGROUP_WRITEBACK 1568 1569 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1570 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1571 unsigned long *pheadroom, unsigned long *pdirty, 1572 unsigned long *pwriteback); 1573 1574 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1575 struct bdi_writeback *wb); 1576 1577 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1578 struct bdi_writeback *wb) 1579 { 1580 struct mem_cgroup *memcg; 1581 1582 if (mem_cgroup_disabled()) 1583 return; 1584 1585 memcg = folio_memcg(folio); 1586 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1587 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1588 } 1589 1590 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1591 1592 #else /* CONFIG_CGROUP_WRITEBACK */ 1593 1594 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1595 { 1596 return NULL; 1597 } 1598 1599 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1600 unsigned long *pfilepages, 1601 unsigned long *pheadroom, 1602 unsigned long *pdirty, 1603 unsigned long *pwriteback) 1604 { 1605 } 1606 1607 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1608 struct bdi_writeback *wb) 1609 { 1610 } 1611 1612 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1613 { 1614 } 1615 1616 #endif /* CONFIG_CGROUP_WRITEBACK */ 1617 1618 struct sock; 1619 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1620 gfp_t gfp_mask); 1621 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1622 #ifdef CONFIG_MEMCG 1623 extern struct static_key_false memcg_sockets_enabled_key; 1624 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1625 void mem_cgroup_sk_alloc(struct sock *sk); 1626 void mem_cgroup_sk_free(struct sock *sk); 1627 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1628 { 1629 #ifdef CONFIG_MEMCG_V1 1630 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1631 return !!memcg->tcpmem_pressure; 1632 #endif /* CONFIG_MEMCG_V1 */ 1633 do { 1634 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1635 return true; 1636 } while ((memcg = parent_mem_cgroup(memcg))); 1637 return false; 1638 } 1639 1640 int alloc_shrinker_info(struct mem_cgroup *memcg); 1641 void free_shrinker_info(struct mem_cgroup *memcg); 1642 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1643 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1644 #else 1645 #define mem_cgroup_sockets_enabled 0 1646 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1647 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1648 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1649 { 1650 return false; 1651 } 1652 1653 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1654 int nid, int shrinker_id) 1655 { 1656 } 1657 #endif 1658 1659 #ifdef CONFIG_MEMCG 1660 bool mem_cgroup_kmem_disabled(void); 1661 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1662 void __memcg_kmem_uncharge_page(struct page *page, int order); 1663 1664 /* 1665 * The returned objcg pointer is safe to use without additional 1666 * protection within a scope. The scope is defined either by 1667 * the current task (similar to the "current" global variable) 1668 * or by set_active_memcg() pair. 1669 * Please, use obj_cgroup_get() to get a reference if the pointer 1670 * needs to be used outside of the local scope. 1671 */ 1672 struct obj_cgroup *current_obj_cgroup(void); 1673 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); 1674 1675 static inline struct obj_cgroup *get_obj_cgroup_from_current(void) 1676 { 1677 struct obj_cgroup *objcg = current_obj_cgroup(); 1678 1679 if (objcg) 1680 obj_cgroup_get(objcg); 1681 1682 return objcg; 1683 } 1684 1685 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1686 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1687 1688 extern struct static_key_false memcg_bpf_enabled_key; 1689 static inline bool memcg_bpf_enabled(void) 1690 { 1691 return static_branch_likely(&memcg_bpf_enabled_key); 1692 } 1693 1694 extern struct static_key_false memcg_kmem_online_key; 1695 1696 static inline bool memcg_kmem_online(void) 1697 { 1698 return static_branch_likely(&memcg_kmem_online_key); 1699 } 1700 1701 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1702 int order) 1703 { 1704 if (memcg_kmem_online()) 1705 return __memcg_kmem_charge_page(page, gfp, order); 1706 return 0; 1707 } 1708 1709 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1710 { 1711 if (memcg_kmem_online()) 1712 __memcg_kmem_uncharge_page(page, order); 1713 } 1714 1715 /* 1716 * A helper for accessing memcg's kmem_id, used for getting 1717 * corresponding LRU lists. 1718 */ 1719 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1720 { 1721 return memcg ? memcg->kmemcg_id : -1; 1722 } 1723 1724 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1725 1726 static inline void count_objcg_events(struct obj_cgroup *objcg, 1727 enum vm_event_item idx, 1728 unsigned long count) 1729 { 1730 struct mem_cgroup *memcg; 1731 1732 if (!memcg_kmem_online()) 1733 return; 1734 1735 rcu_read_lock(); 1736 memcg = obj_cgroup_memcg(objcg); 1737 count_memcg_events(memcg, idx, count); 1738 rcu_read_unlock(); 1739 } 1740 1741 #else 1742 static inline bool mem_cgroup_kmem_disabled(void) 1743 { 1744 return true; 1745 } 1746 1747 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1748 int order) 1749 { 1750 return 0; 1751 } 1752 1753 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1754 { 1755 } 1756 1757 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1758 int order) 1759 { 1760 return 0; 1761 } 1762 1763 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1764 { 1765 } 1766 1767 static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 1768 { 1769 return NULL; 1770 } 1771 1772 static inline bool memcg_bpf_enabled(void) 1773 { 1774 return false; 1775 } 1776 1777 static inline bool memcg_kmem_online(void) 1778 { 1779 return false; 1780 } 1781 1782 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1783 { 1784 return -1; 1785 } 1786 1787 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1788 { 1789 return NULL; 1790 } 1791 1792 static inline void count_objcg_events(struct obj_cgroup *objcg, 1793 enum vm_event_item idx, 1794 unsigned long count) 1795 { 1796 } 1797 1798 #endif /* CONFIG_MEMCG */ 1799 1800 #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) 1801 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1802 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1803 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1804 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg); 1805 #else 1806 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1807 { 1808 return true; 1809 } 1810 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1811 size_t size) 1812 { 1813 } 1814 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1815 size_t size) 1816 { 1817 } 1818 static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 1819 { 1820 /* if zswap is disabled, do not block pages going to the swapping device */ 1821 return true; 1822 } 1823 #endif 1824 1825 1826 /* Cgroup v1-related declarations */ 1827 1828 #ifdef CONFIG_MEMCG_V1 1829 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1830 gfp_t gfp_mask, 1831 unsigned long *total_scanned); 1832 1833 bool mem_cgroup_oom_synchronize(bool wait); 1834 1835 static inline bool task_in_memcg_oom(struct task_struct *p) 1836 { 1837 return p->memcg_in_oom; 1838 } 1839 1840 static inline void mem_cgroup_enter_user_fault(void) 1841 { 1842 WARN_ON(current->in_user_fault); 1843 current->in_user_fault = 1; 1844 } 1845 1846 static inline void mem_cgroup_exit_user_fault(void) 1847 { 1848 WARN_ON(!current->in_user_fault); 1849 current->in_user_fault = 0; 1850 } 1851 1852 void memcg1_swapout(struct folio *folio, swp_entry_t entry); 1853 void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages); 1854 1855 #else /* CONFIG_MEMCG_V1 */ 1856 static inline 1857 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1858 gfp_t gfp_mask, 1859 unsigned long *total_scanned) 1860 { 1861 return 0; 1862 } 1863 1864 static inline bool task_in_memcg_oom(struct task_struct *p) 1865 { 1866 return false; 1867 } 1868 1869 static inline bool mem_cgroup_oom_synchronize(bool wait) 1870 { 1871 return false; 1872 } 1873 1874 static inline void mem_cgroup_enter_user_fault(void) 1875 { 1876 } 1877 1878 static inline void mem_cgroup_exit_user_fault(void) 1879 { 1880 } 1881 1882 static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry) 1883 { 1884 } 1885 1886 static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) 1887 { 1888 } 1889 1890 #endif /* CONFIG_MEMCG_V1 */ 1891 1892 #endif /* _LINUX_MEMCONTROL_H */ 1893