1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_KMEM, 38 MEMCG_NR_STAT, 39 }; 40 41 enum memcg_memory_event { 42 MEMCG_LOW, 43 MEMCG_HIGH, 44 MEMCG_MAX, 45 MEMCG_OOM, 46 MEMCG_OOM_KILL, 47 MEMCG_OOM_GROUP_KILL, 48 MEMCG_SWAP_HIGH, 49 MEMCG_SWAP_MAX, 50 MEMCG_SWAP_FAIL, 51 MEMCG_NR_MEMORY_EVENTS, 52 }; 53 54 struct mem_cgroup_reclaim_cookie { 55 pg_data_t *pgdat; 56 unsigned int generation; 57 }; 58 59 #ifdef CONFIG_MEMCG 60 61 #define MEM_CGROUP_ID_SHIFT 16 62 #define MEM_CGROUP_ID_MAX USHRT_MAX 63 64 struct mem_cgroup_id { 65 int id; 66 refcount_t ref; 67 }; 68 69 /* 70 * Per memcg event counter is incremented at every pagein/pageout. With THP, 71 * it will be incremented by the number of pages. This counter is used 72 * to trigger some periodic events. This is straightforward and better 73 * than using jiffies etc. to handle periodic memcg event. 74 */ 75 enum mem_cgroup_events_target { 76 MEM_CGROUP_TARGET_THRESH, 77 MEM_CGROUP_TARGET_SOFTLIMIT, 78 MEM_CGROUP_NTARGETS, 79 }; 80 81 struct memcg_vmstats_percpu { 82 /* Local (CPU and cgroup) page state & events */ 83 long state[MEMCG_NR_STAT]; 84 unsigned long events[NR_VM_EVENT_ITEMS]; 85 86 /* Delta calculation for lockless upward propagation */ 87 long state_prev[MEMCG_NR_STAT]; 88 unsigned long events_prev[NR_VM_EVENT_ITEMS]; 89 90 /* Cgroup1: threshold notifications & softlimit tree updates */ 91 unsigned long nr_page_events; 92 unsigned long targets[MEM_CGROUP_NTARGETS]; 93 }; 94 95 struct memcg_vmstats { 96 /* Aggregated (CPU and subtree) page state & events */ 97 long state[MEMCG_NR_STAT]; 98 unsigned long events[NR_VM_EVENT_ITEMS]; 99 100 /* Pending child counts during tree propagation */ 101 long state_pending[MEMCG_NR_STAT]; 102 unsigned long events_pending[NR_VM_EVENT_ITEMS]; 103 }; 104 105 struct mem_cgroup_reclaim_iter { 106 struct mem_cgroup *position; 107 /* scan generation, increased every round-trip */ 108 unsigned int generation; 109 }; 110 111 /* 112 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 113 * shrinkers, which have elements charged to this memcg. 114 */ 115 struct shrinker_info { 116 struct rcu_head rcu; 117 atomic_long_t *nr_deferred; 118 unsigned long *map; 119 }; 120 121 struct lruvec_stats_percpu { 122 /* Local (CPU and cgroup) state */ 123 long state[NR_VM_NODE_STAT_ITEMS]; 124 125 /* Delta calculation for lockless upward propagation */ 126 long state_prev[NR_VM_NODE_STAT_ITEMS]; 127 }; 128 129 struct lruvec_stats { 130 /* Aggregated (CPU and subtree) state */ 131 long state[NR_VM_NODE_STAT_ITEMS]; 132 133 /* Pending child counts during tree propagation */ 134 long state_pending[NR_VM_NODE_STAT_ITEMS]; 135 }; 136 137 /* 138 * per-node information in memory controller. 139 */ 140 struct mem_cgroup_per_node { 141 struct lruvec lruvec; 142 143 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 144 struct lruvec_stats lruvec_stats; 145 146 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 147 148 struct mem_cgroup_reclaim_iter iter; 149 150 struct shrinker_info __rcu *shrinker_info; 151 152 struct rb_node tree_node; /* RB tree node */ 153 unsigned long usage_in_excess;/* Set to the value by which */ 154 /* the soft limit is exceeded*/ 155 bool on_tree; 156 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 157 /* use container_of */ 158 }; 159 160 struct mem_cgroup_threshold { 161 struct eventfd_ctx *eventfd; 162 unsigned long threshold; 163 }; 164 165 /* For threshold */ 166 struct mem_cgroup_threshold_ary { 167 /* An array index points to threshold just below or equal to usage. */ 168 int current_threshold; 169 /* Size of entries[] */ 170 unsigned int size; 171 /* Array of thresholds */ 172 struct mem_cgroup_threshold entries[]; 173 }; 174 175 struct mem_cgroup_thresholds { 176 /* Primary thresholds array */ 177 struct mem_cgroup_threshold_ary *primary; 178 /* 179 * Spare threshold array. 180 * This is needed to make mem_cgroup_unregister_event() "never fail". 181 * It must be able to store at least primary->size - 1 entries. 182 */ 183 struct mem_cgroup_threshold_ary *spare; 184 }; 185 186 #if defined(CONFIG_SMP) 187 struct memcg_padding { 188 char x[0]; 189 } ____cacheline_internodealigned_in_smp; 190 #define MEMCG_PADDING(name) struct memcg_padding name 191 #else 192 #define MEMCG_PADDING(name) 193 #endif 194 195 /* 196 * Remember four most recent foreign writebacks with dirty pages in this 197 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 198 * one in a given round, we're likely to catch it later if it keeps 199 * foreign-dirtying, so a fairly low count should be enough. 200 * 201 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 202 */ 203 #define MEMCG_CGWB_FRN_CNT 4 204 205 struct memcg_cgwb_frn { 206 u64 bdi_id; /* bdi->id of the foreign inode */ 207 int memcg_id; /* memcg->css.id of foreign inode */ 208 u64 at; /* jiffies_64 at the time of dirtying */ 209 struct wb_completion done; /* tracks in-flight foreign writebacks */ 210 }; 211 212 /* 213 * Bucket for arbitrarily byte-sized objects charged to a memory 214 * cgroup. The bucket can be reparented in one piece when the cgroup 215 * is destroyed, without having to round up the individual references 216 * of all live memory objects in the wild. 217 */ 218 struct obj_cgroup { 219 struct percpu_ref refcnt; 220 struct mem_cgroup *memcg; 221 atomic_t nr_charged_bytes; 222 union { 223 struct list_head list; /* protected by objcg_lock */ 224 struct rcu_head rcu; 225 }; 226 }; 227 228 /* 229 * The memory controller data structure. The memory controller controls both 230 * page cache and RSS per cgroup. We would eventually like to provide 231 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 232 * to help the administrator determine what knobs to tune. 233 */ 234 struct mem_cgroup { 235 struct cgroup_subsys_state css; 236 237 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 238 struct mem_cgroup_id id; 239 240 /* Accounted resources */ 241 struct page_counter memory; /* Both v1 & v2 */ 242 243 union { 244 struct page_counter swap; /* v2 only */ 245 struct page_counter memsw; /* v1 only */ 246 }; 247 248 /* Legacy consumer-oriented counters */ 249 struct page_counter kmem; /* v1 only */ 250 struct page_counter tcpmem; /* v1 only */ 251 252 /* Range enforcement for interrupt charges */ 253 struct work_struct high_work; 254 255 unsigned long soft_limit; 256 257 /* vmpressure notifications */ 258 struct vmpressure vmpressure; 259 260 /* 261 * Should the OOM killer kill all belonging tasks, had it kill one? 262 */ 263 bool oom_group; 264 265 /* protected by memcg_oom_lock */ 266 bool oom_lock; 267 int under_oom; 268 269 int swappiness; 270 /* OOM-Killer disable */ 271 int oom_kill_disable; 272 273 /* memory.events and memory.events.local */ 274 struct cgroup_file events_file; 275 struct cgroup_file events_local_file; 276 277 /* handle for "memory.swap.events" */ 278 struct cgroup_file swap_events_file; 279 280 /* protect arrays of thresholds */ 281 struct mutex thresholds_lock; 282 283 /* thresholds for memory usage. RCU-protected */ 284 struct mem_cgroup_thresholds thresholds; 285 286 /* thresholds for mem+swap usage. RCU-protected */ 287 struct mem_cgroup_thresholds memsw_thresholds; 288 289 /* For oom notifier event fd */ 290 struct list_head oom_notify; 291 292 /* 293 * Should we move charges of a task when a task is moved into this 294 * mem_cgroup ? And what type of charges should we move ? 295 */ 296 unsigned long move_charge_at_immigrate; 297 /* taken only while moving_account > 0 */ 298 spinlock_t move_lock; 299 unsigned long move_lock_flags; 300 301 MEMCG_PADDING(_pad1_); 302 303 /* memory.stat */ 304 struct memcg_vmstats vmstats; 305 306 /* memory.events */ 307 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 308 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 309 310 unsigned long socket_pressure; 311 312 /* Legacy tcp memory accounting */ 313 bool tcpmem_active; 314 int tcpmem_pressure; 315 316 #ifdef CONFIG_MEMCG_KMEM 317 int kmemcg_id; 318 struct obj_cgroup __rcu *objcg; 319 /* list of inherited objcgs, protected by objcg_lock */ 320 struct list_head objcg_list; 321 #endif 322 323 MEMCG_PADDING(_pad2_); 324 325 /* 326 * set > 0 if pages under this cgroup are moving to other cgroup. 327 */ 328 atomic_t moving_account; 329 struct task_struct *move_lock_task; 330 331 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 332 333 #ifdef CONFIG_CGROUP_WRITEBACK 334 struct list_head cgwb_list; 335 struct wb_domain cgwb_domain; 336 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 337 #endif 338 339 /* List of events which userspace want to receive */ 340 struct list_head event_list; 341 spinlock_t event_list_lock; 342 343 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 344 struct deferred_split deferred_split_queue; 345 #endif 346 347 struct mem_cgroup_per_node *nodeinfo[]; 348 }; 349 350 /* 351 * size of first charge trial. "32" comes from vmscan.c's magic value. 352 * TODO: maybe necessary to use big numbers in big irons. 353 */ 354 #define MEMCG_CHARGE_BATCH 32U 355 356 extern struct mem_cgroup *root_mem_cgroup; 357 358 enum page_memcg_data_flags { 359 /* page->memcg_data is a pointer to an objcgs vector */ 360 MEMCG_DATA_OBJCGS = (1UL << 0), 361 /* page has been accounted as a non-slab kernel page */ 362 MEMCG_DATA_KMEM = (1UL << 1), 363 /* the next bit after the last actual flag */ 364 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 365 }; 366 367 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 368 369 static inline bool folio_memcg_kmem(struct folio *folio); 370 371 /* 372 * After the initialization objcg->memcg is always pointing at 373 * a valid memcg, but can be atomically swapped to the parent memcg. 374 * 375 * The caller must ensure that the returned memcg won't be released: 376 * e.g. acquire the rcu_read_lock or css_set_lock. 377 */ 378 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 379 { 380 return READ_ONCE(objcg->memcg); 381 } 382 383 /* 384 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 385 * @folio: Pointer to the folio. 386 * 387 * Returns a pointer to the memory cgroup associated with the folio, 388 * or NULL. This function assumes that the folio is known to have a 389 * proper memory cgroup pointer. It's not safe to call this function 390 * against some type of folios, e.g. slab folios or ex-slab folios or 391 * kmem folios. 392 */ 393 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 394 { 395 unsigned long memcg_data = folio->memcg_data; 396 397 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 398 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 399 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 400 401 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 402 } 403 404 /* 405 * __folio_objcg - get the object cgroup associated with a kmem folio. 406 * @folio: Pointer to the folio. 407 * 408 * Returns a pointer to the object cgroup associated with the folio, 409 * or NULL. This function assumes that the folio is known to have a 410 * proper object cgroup pointer. It's not safe to call this function 411 * against some type of folios, e.g. slab folios or ex-slab folios or 412 * LRU folios. 413 */ 414 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 415 { 416 unsigned long memcg_data = folio->memcg_data; 417 418 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 419 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 420 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 421 422 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 423 } 424 425 /* 426 * folio_memcg - Get the memory cgroup associated with a folio. 427 * @folio: Pointer to the folio. 428 * 429 * Returns a pointer to the memory cgroup associated with the folio, 430 * or NULL. This function assumes that the folio is known to have a 431 * proper memory cgroup pointer. It's not safe to call this function 432 * against some type of folios, e.g. slab folios or ex-slab folios. 433 * 434 * For a non-kmem folio any of the following ensures folio and memcg binding 435 * stability: 436 * 437 * - the folio lock 438 * - LRU isolation 439 * - lock_page_memcg() 440 * - exclusive reference 441 * 442 * For a kmem folio a caller should hold an rcu read lock to protect memcg 443 * associated with a kmem folio from being released. 444 */ 445 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 446 { 447 if (folio_memcg_kmem(folio)) 448 return obj_cgroup_memcg(__folio_objcg(folio)); 449 return __folio_memcg(folio); 450 } 451 452 static inline struct mem_cgroup *page_memcg(struct page *page) 453 { 454 return folio_memcg(page_folio(page)); 455 } 456 457 /** 458 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 459 * @folio: Pointer to the folio. 460 * 461 * This function assumes that the folio is known to have a 462 * proper memory cgroup pointer. It's not safe to call this function 463 * against some type of folios, e.g. slab folios or ex-slab folios. 464 * 465 * Return: A pointer to the memory cgroup associated with the folio, 466 * or NULL. 467 */ 468 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 469 { 470 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 471 472 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 473 WARN_ON_ONCE(!rcu_read_lock_held()); 474 475 if (memcg_data & MEMCG_DATA_KMEM) { 476 struct obj_cgroup *objcg; 477 478 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 479 return obj_cgroup_memcg(objcg); 480 } 481 482 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 483 } 484 485 /* 486 * page_memcg_check - get the memory cgroup associated with a page 487 * @page: a pointer to the page struct 488 * 489 * Returns a pointer to the memory cgroup associated with the page, 490 * or NULL. This function unlike page_memcg() can take any page 491 * as an argument. It has to be used in cases when it's not known if a page 492 * has an associated memory cgroup pointer or an object cgroups vector or 493 * an object cgroup. 494 * 495 * For a non-kmem page any of the following ensures page and memcg binding 496 * stability: 497 * 498 * - the page lock 499 * - LRU isolation 500 * - lock_page_memcg() 501 * - exclusive reference 502 * 503 * For a kmem page a caller should hold an rcu read lock to protect memcg 504 * associated with a kmem page from being released. 505 */ 506 static inline struct mem_cgroup *page_memcg_check(struct page *page) 507 { 508 /* 509 * Because page->memcg_data might be changed asynchronously 510 * for slab pages, READ_ONCE() should be used here. 511 */ 512 unsigned long memcg_data = READ_ONCE(page->memcg_data); 513 514 if (memcg_data & MEMCG_DATA_OBJCGS) 515 return NULL; 516 517 if (memcg_data & MEMCG_DATA_KMEM) { 518 struct obj_cgroup *objcg; 519 520 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 521 return obj_cgroup_memcg(objcg); 522 } 523 524 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 525 } 526 527 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 528 { 529 struct mem_cgroup *memcg; 530 531 rcu_read_lock(); 532 retry: 533 memcg = obj_cgroup_memcg(objcg); 534 if (unlikely(!css_tryget(&memcg->css))) 535 goto retry; 536 rcu_read_unlock(); 537 538 return memcg; 539 } 540 541 #ifdef CONFIG_MEMCG_KMEM 542 /* 543 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 544 * @folio: Pointer to the folio. 545 * 546 * Checks if the folio has MemcgKmem flag set. The caller must ensure 547 * that the folio has an associated memory cgroup. It's not safe to call 548 * this function against some types of folios, e.g. slab folios. 549 */ 550 static inline bool folio_memcg_kmem(struct folio *folio) 551 { 552 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 553 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 554 return folio->memcg_data & MEMCG_DATA_KMEM; 555 } 556 557 558 #else 559 static inline bool folio_memcg_kmem(struct folio *folio) 560 { 561 return false; 562 } 563 564 #endif 565 566 static inline bool PageMemcgKmem(struct page *page) 567 { 568 return folio_memcg_kmem(page_folio(page)); 569 } 570 571 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 572 { 573 return (memcg == root_mem_cgroup); 574 } 575 576 static inline bool mem_cgroup_disabled(void) 577 { 578 return !cgroup_subsys_enabled(memory_cgrp_subsys); 579 } 580 581 static inline void mem_cgroup_protection(struct mem_cgroup *root, 582 struct mem_cgroup *memcg, 583 unsigned long *min, 584 unsigned long *low) 585 { 586 *min = *low = 0; 587 588 if (mem_cgroup_disabled()) 589 return; 590 591 /* 592 * There is no reclaim protection applied to a targeted reclaim. 593 * We are special casing this specific case here because 594 * mem_cgroup_protected calculation is not robust enough to keep 595 * the protection invariant for calculated effective values for 596 * parallel reclaimers with different reclaim target. This is 597 * especially a problem for tail memcgs (as they have pages on LRU) 598 * which would want to have effective values 0 for targeted reclaim 599 * but a different value for external reclaim. 600 * 601 * Example 602 * Let's have global and A's reclaim in parallel: 603 * | 604 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 605 * |\ 606 * | C (low = 1G, usage = 2.5G) 607 * B (low = 1G, usage = 0.5G) 608 * 609 * For the global reclaim 610 * A.elow = A.low 611 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 612 * C.elow = min(C.usage, C.low) 613 * 614 * With the effective values resetting we have A reclaim 615 * A.elow = 0 616 * B.elow = B.low 617 * C.elow = C.low 618 * 619 * If the global reclaim races with A's reclaim then 620 * B.elow = C.elow = 0 because children_low_usage > A.elow) 621 * is possible and reclaiming B would be violating the protection. 622 * 623 */ 624 if (root == memcg) 625 return; 626 627 *min = READ_ONCE(memcg->memory.emin); 628 *low = READ_ONCE(memcg->memory.elow); 629 } 630 631 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 632 struct mem_cgroup *memcg); 633 634 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) 635 { 636 /* 637 * The root memcg doesn't account charges, and doesn't support 638 * protection. 639 */ 640 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); 641 642 } 643 644 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 645 { 646 if (!mem_cgroup_supports_protection(memcg)) 647 return false; 648 649 return READ_ONCE(memcg->memory.elow) >= 650 page_counter_read(&memcg->memory); 651 } 652 653 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 654 { 655 if (!mem_cgroup_supports_protection(memcg)) 656 return false; 657 658 return READ_ONCE(memcg->memory.emin) >= 659 page_counter_read(&memcg->memory); 660 } 661 662 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 663 664 /** 665 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 666 * @folio: Folio to charge. 667 * @mm: mm context of the allocating task. 668 * @gfp: Reclaim mode. 669 * 670 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 671 * pages according to @gfp if necessary. If @mm is NULL, try to 672 * charge to the active memcg. 673 * 674 * Do not use this for folios allocated for swapin. 675 * 676 * Return: 0 on success. Otherwise, an error code is returned. 677 */ 678 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 679 gfp_t gfp) 680 { 681 if (mem_cgroup_disabled()) 682 return 0; 683 return __mem_cgroup_charge(folio, mm, gfp); 684 } 685 686 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 687 gfp_t gfp, swp_entry_t entry); 688 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 689 690 void __mem_cgroup_uncharge(struct folio *folio); 691 692 /** 693 * mem_cgroup_uncharge - Uncharge a folio. 694 * @folio: Folio to uncharge. 695 * 696 * Uncharge a folio previously charged with mem_cgroup_charge(). 697 */ 698 static inline void mem_cgroup_uncharge(struct folio *folio) 699 { 700 if (mem_cgroup_disabled()) 701 return; 702 __mem_cgroup_uncharge(folio); 703 } 704 705 void __mem_cgroup_uncharge_list(struct list_head *page_list); 706 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 707 { 708 if (mem_cgroup_disabled()) 709 return; 710 __mem_cgroup_uncharge_list(page_list); 711 } 712 713 void mem_cgroup_migrate(struct folio *old, struct folio *new); 714 715 /** 716 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 717 * @memcg: memcg of the wanted lruvec 718 * @pgdat: pglist_data 719 * 720 * Returns the lru list vector holding pages for a given @memcg & 721 * @pgdat combination. This can be the node lruvec, if the memory 722 * controller is disabled. 723 */ 724 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 725 struct pglist_data *pgdat) 726 { 727 struct mem_cgroup_per_node *mz; 728 struct lruvec *lruvec; 729 730 if (mem_cgroup_disabled()) { 731 lruvec = &pgdat->__lruvec; 732 goto out; 733 } 734 735 if (!memcg) 736 memcg = root_mem_cgroup; 737 738 mz = memcg->nodeinfo[pgdat->node_id]; 739 lruvec = &mz->lruvec; 740 out: 741 /* 742 * Since a node can be onlined after the mem_cgroup was created, 743 * we have to be prepared to initialize lruvec->pgdat here; 744 * and if offlined then reonlined, we need to reinitialize it. 745 */ 746 if (unlikely(lruvec->pgdat != pgdat)) 747 lruvec->pgdat = pgdat; 748 return lruvec; 749 } 750 751 /** 752 * folio_lruvec - return lruvec for isolating/putting an LRU folio 753 * @folio: Pointer to the folio. 754 * 755 * This function relies on folio->mem_cgroup being stable. 756 */ 757 static inline struct lruvec *folio_lruvec(struct folio *folio) 758 { 759 struct mem_cgroup *memcg = folio_memcg(folio); 760 761 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 762 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 763 } 764 765 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 766 767 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 768 769 struct lruvec *folio_lruvec_lock(struct folio *folio); 770 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 771 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 772 unsigned long *flags); 773 774 #ifdef CONFIG_DEBUG_VM 775 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 776 #else 777 static inline 778 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 779 { 780 } 781 #endif 782 783 static inline 784 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 785 return css ? container_of(css, struct mem_cgroup, css) : NULL; 786 } 787 788 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 789 { 790 return percpu_ref_tryget(&objcg->refcnt); 791 } 792 793 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 794 { 795 percpu_ref_get(&objcg->refcnt); 796 } 797 798 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 799 unsigned long nr) 800 { 801 percpu_ref_get_many(&objcg->refcnt, nr); 802 } 803 804 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 805 { 806 percpu_ref_put(&objcg->refcnt); 807 } 808 809 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 810 { 811 if (memcg) 812 css_put(&memcg->css); 813 } 814 815 #define mem_cgroup_from_counter(counter, member) \ 816 container_of(counter, struct mem_cgroup, member) 817 818 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 819 struct mem_cgroup *, 820 struct mem_cgroup_reclaim_cookie *); 821 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 822 int mem_cgroup_scan_tasks(struct mem_cgroup *, 823 int (*)(struct task_struct *, void *), void *); 824 825 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 826 { 827 if (mem_cgroup_disabled()) 828 return 0; 829 830 return memcg->id.id; 831 } 832 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 833 834 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 835 { 836 return mem_cgroup_from_css(seq_css(m)); 837 } 838 839 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 840 { 841 struct mem_cgroup_per_node *mz; 842 843 if (mem_cgroup_disabled()) 844 return NULL; 845 846 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 847 return mz->memcg; 848 } 849 850 /** 851 * parent_mem_cgroup - find the accounting parent of a memcg 852 * @memcg: memcg whose parent to find 853 * 854 * Returns the parent memcg, or NULL if this is the root or the memory 855 * controller is in legacy no-hierarchy mode. 856 */ 857 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 858 { 859 return mem_cgroup_from_css(memcg->css.parent); 860 } 861 862 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 863 struct mem_cgroup *root) 864 { 865 if (root == memcg) 866 return true; 867 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 868 } 869 870 static inline bool mm_match_cgroup(struct mm_struct *mm, 871 struct mem_cgroup *memcg) 872 { 873 struct mem_cgroup *task_memcg; 874 bool match = false; 875 876 rcu_read_lock(); 877 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 878 if (task_memcg) 879 match = mem_cgroup_is_descendant(task_memcg, memcg); 880 rcu_read_unlock(); 881 return match; 882 } 883 884 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 885 ino_t page_cgroup_ino(struct page *page); 886 887 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 888 { 889 if (mem_cgroup_disabled()) 890 return true; 891 return !!(memcg->css.flags & CSS_ONLINE); 892 } 893 894 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 895 int zid, int nr_pages); 896 897 static inline 898 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 899 enum lru_list lru, int zone_idx) 900 { 901 struct mem_cgroup_per_node *mz; 902 903 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 904 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 905 } 906 907 void mem_cgroup_handle_over_high(void); 908 909 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 910 911 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 912 913 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 914 struct task_struct *p); 915 916 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 917 918 static inline void mem_cgroup_enter_user_fault(void) 919 { 920 WARN_ON(current->in_user_fault); 921 current->in_user_fault = 1; 922 } 923 924 static inline void mem_cgroup_exit_user_fault(void) 925 { 926 WARN_ON(!current->in_user_fault); 927 current->in_user_fault = 0; 928 } 929 930 static inline bool task_in_memcg_oom(struct task_struct *p) 931 { 932 return p->memcg_in_oom; 933 } 934 935 bool mem_cgroup_oom_synchronize(bool wait); 936 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 937 struct mem_cgroup *oom_domain); 938 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 939 940 void folio_memcg_lock(struct folio *folio); 941 void folio_memcg_unlock(struct folio *folio); 942 void lock_page_memcg(struct page *page); 943 void unlock_page_memcg(struct page *page); 944 945 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 946 947 /* idx can be of type enum memcg_stat_item or node_stat_item */ 948 static inline void mod_memcg_state(struct mem_cgroup *memcg, 949 int idx, int val) 950 { 951 unsigned long flags; 952 953 local_irq_save(flags); 954 __mod_memcg_state(memcg, idx, val); 955 local_irq_restore(flags); 956 } 957 958 static inline void mod_memcg_page_state(struct page *page, 959 int idx, int val) 960 { 961 struct mem_cgroup *memcg; 962 963 if (mem_cgroup_disabled()) 964 return; 965 966 rcu_read_lock(); 967 memcg = page_memcg(page); 968 if (memcg) 969 mod_memcg_state(memcg, idx, val); 970 rcu_read_unlock(); 971 } 972 973 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 974 { 975 return READ_ONCE(memcg->vmstats.state[idx]); 976 } 977 978 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 979 enum node_stat_item idx) 980 { 981 struct mem_cgroup_per_node *pn; 982 983 if (mem_cgroup_disabled()) 984 return node_page_state(lruvec_pgdat(lruvec), idx); 985 986 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 987 return READ_ONCE(pn->lruvec_stats.state[idx]); 988 } 989 990 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 991 enum node_stat_item idx) 992 { 993 struct mem_cgroup_per_node *pn; 994 long x = 0; 995 int cpu; 996 997 if (mem_cgroup_disabled()) 998 return node_page_state(lruvec_pgdat(lruvec), idx); 999 1000 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1001 for_each_possible_cpu(cpu) 1002 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 1003 #ifdef CONFIG_SMP 1004 if (x < 0) 1005 x = 0; 1006 #endif 1007 return x; 1008 } 1009 1010 void mem_cgroup_flush_stats(void); 1011 void mem_cgroup_flush_stats_delayed(void); 1012 1013 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1014 int val); 1015 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1016 1017 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1018 int val) 1019 { 1020 unsigned long flags; 1021 1022 local_irq_save(flags); 1023 __mod_lruvec_kmem_state(p, idx, val); 1024 local_irq_restore(flags); 1025 } 1026 1027 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1028 enum node_stat_item idx, int val) 1029 { 1030 unsigned long flags; 1031 1032 local_irq_save(flags); 1033 __mod_memcg_lruvec_state(lruvec, idx, val); 1034 local_irq_restore(flags); 1035 } 1036 1037 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1038 unsigned long count); 1039 1040 static inline void count_memcg_events(struct mem_cgroup *memcg, 1041 enum vm_event_item idx, 1042 unsigned long count) 1043 { 1044 unsigned long flags; 1045 1046 local_irq_save(flags); 1047 __count_memcg_events(memcg, idx, count); 1048 local_irq_restore(flags); 1049 } 1050 1051 static inline void count_memcg_page_event(struct page *page, 1052 enum vm_event_item idx) 1053 { 1054 struct mem_cgroup *memcg = page_memcg(page); 1055 1056 if (memcg) 1057 count_memcg_events(memcg, idx, 1); 1058 } 1059 1060 static inline void count_memcg_event_mm(struct mm_struct *mm, 1061 enum vm_event_item idx) 1062 { 1063 struct mem_cgroup *memcg; 1064 1065 if (mem_cgroup_disabled()) 1066 return; 1067 1068 rcu_read_lock(); 1069 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1070 if (likely(memcg)) 1071 count_memcg_events(memcg, idx, 1); 1072 rcu_read_unlock(); 1073 } 1074 1075 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1076 enum memcg_memory_event event) 1077 { 1078 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1079 event == MEMCG_SWAP_FAIL; 1080 1081 atomic_long_inc(&memcg->memory_events_local[event]); 1082 if (!swap_event) 1083 cgroup_file_notify(&memcg->events_local_file); 1084 1085 do { 1086 atomic_long_inc(&memcg->memory_events[event]); 1087 if (swap_event) 1088 cgroup_file_notify(&memcg->swap_events_file); 1089 else 1090 cgroup_file_notify(&memcg->events_file); 1091 1092 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1093 break; 1094 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1095 break; 1096 } while ((memcg = parent_mem_cgroup(memcg)) && 1097 !mem_cgroup_is_root(memcg)); 1098 } 1099 1100 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1101 enum memcg_memory_event event) 1102 { 1103 struct mem_cgroup *memcg; 1104 1105 if (mem_cgroup_disabled()) 1106 return; 1107 1108 rcu_read_lock(); 1109 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1110 if (likely(memcg)) 1111 memcg_memory_event(memcg, event); 1112 rcu_read_unlock(); 1113 } 1114 1115 void split_page_memcg(struct page *head, unsigned int nr); 1116 1117 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1118 gfp_t gfp_mask, 1119 unsigned long *total_scanned); 1120 1121 #else /* CONFIG_MEMCG */ 1122 1123 #define MEM_CGROUP_ID_SHIFT 0 1124 #define MEM_CGROUP_ID_MAX 0 1125 1126 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1127 { 1128 return NULL; 1129 } 1130 1131 static inline struct mem_cgroup *page_memcg(struct page *page) 1132 { 1133 return NULL; 1134 } 1135 1136 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1137 { 1138 WARN_ON_ONCE(!rcu_read_lock_held()); 1139 return NULL; 1140 } 1141 1142 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1143 { 1144 return NULL; 1145 } 1146 1147 static inline bool folio_memcg_kmem(struct folio *folio) 1148 { 1149 return false; 1150 } 1151 1152 static inline bool PageMemcgKmem(struct page *page) 1153 { 1154 return false; 1155 } 1156 1157 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1158 { 1159 return true; 1160 } 1161 1162 static inline bool mem_cgroup_disabled(void) 1163 { 1164 return true; 1165 } 1166 1167 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1168 enum memcg_memory_event event) 1169 { 1170 } 1171 1172 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1173 enum memcg_memory_event event) 1174 { 1175 } 1176 1177 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1178 struct mem_cgroup *memcg, 1179 unsigned long *min, 1180 unsigned long *low) 1181 { 1182 *min = *low = 0; 1183 } 1184 1185 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1186 struct mem_cgroup *memcg) 1187 { 1188 } 1189 1190 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 1191 { 1192 return false; 1193 } 1194 1195 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 1196 { 1197 return false; 1198 } 1199 1200 static inline int mem_cgroup_charge(struct folio *folio, 1201 struct mm_struct *mm, gfp_t gfp) 1202 { 1203 return 0; 1204 } 1205 1206 static inline int mem_cgroup_swapin_charge_page(struct page *page, 1207 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1208 { 1209 return 0; 1210 } 1211 1212 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1213 { 1214 } 1215 1216 static inline void mem_cgroup_uncharge(struct folio *folio) 1217 { 1218 } 1219 1220 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1221 { 1222 } 1223 1224 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1225 { 1226 } 1227 1228 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1229 struct pglist_data *pgdat) 1230 { 1231 return &pgdat->__lruvec; 1232 } 1233 1234 static inline struct lruvec *folio_lruvec(struct folio *folio) 1235 { 1236 struct pglist_data *pgdat = folio_pgdat(folio); 1237 return &pgdat->__lruvec; 1238 } 1239 1240 static inline 1241 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1242 { 1243 } 1244 1245 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1246 { 1247 return NULL; 1248 } 1249 1250 static inline bool mm_match_cgroup(struct mm_struct *mm, 1251 struct mem_cgroup *memcg) 1252 { 1253 return true; 1254 } 1255 1256 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1257 { 1258 return NULL; 1259 } 1260 1261 static inline 1262 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1263 { 1264 return NULL; 1265 } 1266 1267 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1268 { 1269 } 1270 1271 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1272 { 1273 struct pglist_data *pgdat = folio_pgdat(folio); 1274 1275 spin_lock(&pgdat->__lruvec.lru_lock); 1276 return &pgdat->__lruvec; 1277 } 1278 1279 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1280 { 1281 struct pglist_data *pgdat = folio_pgdat(folio); 1282 1283 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1284 return &pgdat->__lruvec; 1285 } 1286 1287 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1288 unsigned long *flagsp) 1289 { 1290 struct pglist_data *pgdat = folio_pgdat(folio); 1291 1292 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1293 return &pgdat->__lruvec; 1294 } 1295 1296 static inline struct mem_cgroup * 1297 mem_cgroup_iter(struct mem_cgroup *root, 1298 struct mem_cgroup *prev, 1299 struct mem_cgroup_reclaim_cookie *reclaim) 1300 { 1301 return NULL; 1302 } 1303 1304 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1305 struct mem_cgroup *prev) 1306 { 1307 } 1308 1309 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1310 int (*fn)(struct task_struct *, void *), void *arg) 1311 { 1312 return 0; 1313 } 1314 1315 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1316 { 1317 return 0; 1318 } 1319 1320 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1321 { 1322 WARN_ON_ONCE(id); 1323 /* XXX: This should always return root_mem_cgroup */ 1324 return NULL; 1325 } 1326 1327 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1328 { 1329 return NULL; 1330 } 1331 1332 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1333 { 1334 return NULL; 1335 } 1336 1337 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1338 { 1339 return true; 1340 } 1341 1342 static inline 1343 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1344 enum lru_list lru, int zone_idx) 1345 { 1346 return 0; 1347 } 1348 1349 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1350 { 1351 return 0; 1352 } 1353 1354 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1355 { 1356 return 0; 1357 } 1358 1359 static inline void 1360 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1361 { 1362 } 1363 1364 static inline void 1365 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1366 { 1367 } 1368 1369 static inline void lock_page_memcg(struct page *page) 1370 { 1371 } 1372 1373 static inline void unlock_page_memcg(struct page *page) 1374 { 1375 } 1376 1377 static inline void folio_memcg_lock(struct folio *folio) 1378 { 1379 } 1380 1381 static inline void folio_memcg_unlock(struct folio *folio) 1382 { 1383 } 1384 1385 static inline void mem_cgroup_handle_over_high(void) 1386 { 1387 } 1388 1389 static inline void mem_cgroup_enter_user_fault(void) 1390 { 1391 } 1392 1393 static inline void mem_cgroup_exit_user_fault(void) 1394 { 1395 } 1396 1397 static inline bool task_in_memcg_oom(struct task_struct *p) 1398 { 1399 return false; 1400 } 1401 1402 static inline bool mem_cgroup_oom_synchronize(bool wait) 1403 { 1404 return false; 1405 } 1406 1407 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1408 struct task_struct *victim, struct mem_cgroup *oom_domain) 1409 { 1410 return NULL; 1411 } 1412 1413 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1414 { 1415 } 1416 1417 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1418 int idx, 1419 int nr) 1420 { 1421 } 1422 1423 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1424 int idx, 1425 int nr) 1426 { 1427 } 1428 1429 static inline void mod_memcg_page_state(struct page *page, 1430 int idx, int val) 1431 { 1432 } 1433 1434 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1435 { 1436 return 0; 1437 } 1438 1439 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1440 enum node_stat_item idx) 1441 { 1442 return node_page_state(lruvec_pgdat(lruvec), idx); 1443 } 1444 1445 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1446 enum node_stat_item idx) 1447 { 1448 return node_page_state(lruvec_pgdat(lruvec), idx); 1449 } 1450 1451 static inline void mem_cgroup_flush_stats(void) 1452 { 1453 } 1454 1455 static inline void mem_cgroup_flush_stats_delayed(void) 1456 { 1457 } 1458 1459 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1460 enum node_stat_item idx, int val) 1461 { 1462 } 1463 1464 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1465 int val) 1466 { 1467 struct page *page = virt_to_head_page(p); 1468 1469 __mod_node_page_state(page_pgdat(page), idx, val); 1470 } 1471 1472 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1473 int val) 1474 { 1475 struct page *page = virt_to_head_page(p); 1476 1477 mod_node_page_state(page_pgdat(page), idx, val); 1478 } 1479 1480 static inline void count_memcg_events(struct mem_cgroup *memcg, 1481 enum vm_event_item idx, 1482 unsigned long count) 1483 { 1484 } 1485 1486 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1487 enum vm_event_item idx, 1488 unsigned long count) 1489 { 1490 } 1491 1492 static inline void count_memcg_page_event(struct page *page, 1493 int idx) 1494 { 1495 } 1496 1497 static inline 1498 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1499 { 1500 } 1501 1502 static inline void split_page_memcg(struct page *head, unsigned int nr) 1503 { 1504 } 1505 1506 static inline 1507 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1508 gfp_t gfp_mask, 1509 unsigned long *total_scanned) 1510 { 1511 return 0; 1512 } 1513 #endif /* CONFIG_MEMCG */ 1514 1515 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1516 { 1517 __mod_lruvec_kmem_state(p, idx, 1); 1518 } 1519 1520 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1521 { 1522 __mod_lruvec_kmem_state(p, idx, -1); 1523 } 1524 1525 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1526 { 1527 struct mem_cgroup *memcg; 1528 1529 memcg = lruvec_memcg(lruvec); 1530 if (!memcg) 1531 return NULL; 1532 memcg = parent_mem_cgroup(memcg); 1533 if (!memcg) 1534 return NULL; 1535 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1536 } 1537 1538 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1539 { 1540 spin_unlock(&lruvec->lru_lock); 1541 } 1542 1543 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1544 { 1545 spin_unlock_irq(&lruvec->lru_lock); 1546 } 1547 1548 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1549 unsigned long flags) 1550 { 1551 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1552 } 1553 1554 /* Test requires a stable page->memcg binding, see page_memcg() */ 1555 static inline bool folio_matches_lruvec(struct folio *folio, 1556 struct lruvec *lruvec) 1557 { 1558 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1559 lruvec_memcg(lruvec) == folio_memcg(folio); 1560 } 1561 1562 /* Don't lock again iff page's lruvec locked */ 1563 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1564 struct lruvec *locked_lruvec) 1565 { 1566 if (locked_lruvec) { 1567 if (folio_matches_lruvec(folio, locked_lruvec)) 1568 return locked_lruvec; 1569 1570 unlock_page_lruvec_irq(locked_lruvec); 1571 } 1572 1573 return folio_lruvec_lock_irq(folio); 1574 } 1575 1576 /* Don't lock again iff page's lruvec locked */ 1577 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1578 struct lruvec *locked_lruvec, unsigned long *flags) 1579 { 1580 if (locked_lruvec) { 1581 if (folio_matches_lruvec(folio, locked_lruvec)) 1582 return locked_lruvec; 1583 1584 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1585 } 1586 1587 return folio_lruvec_lock_irqsave(folio, flags); 1588 } 1589 1590 #ifdef CONFIG_CGROUP_WRITEBACK 1591 1592 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1593 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1594 unsigned long *pheadroom, unsigned long *pdirty, 1595 unsigned long *pwriteback); 1596 1597 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1598 struct bdi_writeback *wb); 1599 1600 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1601 struct bdi_writeback *wb) 1602 { 1603 if (mem_cgroup_disabled()) 1604 return; 1605 1606 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css)) 1607 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1608 } 1609 1610 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1611 1612 #else /* CONFIG_CGROUP_WRITEBACK */ 1613 1614 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1615 { 1616 return NULL; 1617 } 1618 1619 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1620 unsigned long *pfilepages, 1621 unsigned long *pheadroom, 1622 unsigned long *pdirty, 1623 unsigned long *pwriteback) 1624 { 1625 } 1626 1627 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1628 struct bdi_writeback *wb) 1629 { 1630 } 1631 1632 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1633 { 1634 } 1635 1636 #endif /* CONFIG_CGROUP_WRITEBACK */ 1637 1638 struct sock; 1639 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1640 gfp_t gfp_mask); 1641 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1642 #ifdef CONFIG_MEMCG 1643 extern struct static_key_false memcg_sockets_enabled_key; 1644 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1645 void mem_cgroup_sk_alloc(struct sock *sk); 1646 void mem_cgroup_sk_free(struct sock *sk); 1647 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1648 { 1649 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1650 return true; 1651 do { 1652 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1653 return true; 1654 } while ((memcg = parent_mem_cgroup(memcg))); 1655 return false; 1656 } 1657 1658 int alloc_shrinker_info(struct mem_cgroup *memcg); 1659 void free_shrinker_info(struct mem_cgroup *memcg); 1660 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1661 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1662 #else 1663 #define mem_cgroup_sockets_enabled 0 1664 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1665 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1666 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1667 { 1668 return false; 1669 } 1670 1671 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1672 int nid, int shrinker_id) 1673 { 1674 } 1675 #endif 1676 1677 #ifdef CONFIG_MEMCG_KMEM 1678 bool mem_cgroup_kmem_disabled(void); 1679 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1680 void __memcg_kmem_uncharge_page(struct page *page, int order); 1681 1682 struct obj_cgroup *get_obj_cgroup_from_current(void); 1683 1684 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1685 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1686 1687 extern struct static_key_false memcg_kmem_enabled_key; 1688 1689 static inline bool memcg_kmem_enabled(void) 1690 { 1691 return static_branch_likely(&memcg_kmem_enabled_key); 1692 } 1693 1694 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1695 int order) 1696 { 1697 if (memcg_kmem_enabled()) 1698 return __memcg_kmem_charge_page(page, gfp, order); 1699 return 0; 1700 } 1701 1702 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1703 { 1704 if (memcg_kmem_enabled()) 1705 __memcg_kmem_uncharge_page(page, order); 1706 } 1707 1708 /* 1709 * A helper for accessing memcg's kmem_id, used for getting 1710 * corresponding LRU lists. 1711 */ 1712 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1713 { 1714 return memcg ? memcg->kmemcg_id : -1; 1715 } 1716 1717 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1718 1719 #else 1720 static inline bool mem_cgroup_kmem_disabled(void) 1721 { 1722 return true; 1723 } 1724 1725 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1726 int order) 1727 { 1728 return 0; 1729 } 1730 1731 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1732 { 1733 } 1734 1735 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1736 int order) 1737 { 1738 return 0; 1739 } 1740 1741 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1742 { 1743 } 1744 1745 static inline bool memcg_kmem_enabled(void) 1746 { 1747 return false; 1748 } 1749 1750 static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1751 { 1752 return -1; 1753 } 1754 1755 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1756 { 1757 return NULL; 1758 } 1759 1760 #endif /* CONFIG_MEMCG_KMEM */ 1761 1762 #endif /* _LINUX_MEMCONTROL_H */ 1763