1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <[email protected]> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <[email protected]> 9 */ 10 11 #ifndef _LINUX_MEMCONTROL_H 12 #define _LINUX_MEMCONTROL_H 13 #include <linux/cgroup.h> 14 #include <linux/vm_event_item.h> 15 #include <linux/hardirq.h> 16 #include <linux/jump_label.h> 17 #include <linux/page_counter.h> 18 #include <linux/vmpressure.h> 19 #include <linux/eventfd.h> 20 #include <linux/mm.h> 21 #include <linux/vmstat.h> 22 #include <linux/writeback.h> 23 #include <linux/page-flags.h> 24 25 struct mem_cgroup; 26 struct obj_cgroup; 27 struct page; 28 struct mm_struct; 29 struct kmem_cache; 30 31 /* Cgroup-specific page state, on top of universal node page state */ 32 enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_NR_STAT, 37 }; 38 39 enum memcg_memory_event { 40 MEMCG_LOW, 41 MEMCG_HIGH, 42 MEMCG_MAX, 43 MEMCG_OOM, 44 MEMCG_OOM_KILL, 45 MEMCG_SWAP_HIGH, 46 MEMCG_SWAP_MAX, 47 MEMCG_SWAP_FAIL, 48 MEMCG_NR_MEMORY_EVENTS, 49 }; 50 51 struct mem_cgroup_reclaim_cookie { 52 pg_data_t *pgdat; 53 unsigned int generation; 54 }; 55 56 #ifdef CONFIG_MEMCG 57 58 #define MEM_CGROUP_ID_SHIFT 16 59 #define MEM_CGROUP_ID_MAX USHRT_MAX 60 61 struct mem_cgroup_id { 62 int id; 63 refcount_t ref; 64 }; 65 66 /* 67 * Per memcg event counter is incremented at every pagein/pageout. With THP, 68 * it will be incremented by the number of pages. This counter is used 69 * to trigger some periodic events. This is straightforward and better 70 * than using jiffies etc. to handle periodic memcg event. 71 */ 72 enum mem_cgroup_events_target { 73 MEM_CGROUP_TARGET_THRESH, 74 MEM_CGROUP_TARGET_SOFTLIMIT, 75 MEM_CGROUP_NTARGETS, 76 }; 77 78 struct memcg_vmstats_percpu { 79 /* Local (CPU and cgroup) page state & events */ 80 long state[MEMCG_NR_STAT]; 81 unsigned long events[NR_VM_EVENT_ITEMS]; 82 83 /* Delta calculation for lockless upward propagation */ 84 long state_prev[MEMCG_NR_STAT]; 85 unsigned long events_prev[NR_VM_EVENT_ITEMS]; 86 87 /* Cgroup1: threshold notifications & softlimit tree updates */ 88 unsigned long nr_page_events; 89 unsigned long targets[MEM_CGROUP_NTARGETS]; 90 }; 91 92 struct memcg_vmstats { 93 /* Aggregated (CPU and subtree) page state & events */ 94 long state[MEMCG_NR_STAT]; 95 unsigned long events[NR_VM_EVENT_ITEMS]; 96 97 /* Pending child counts during tree propagation */ 98 long state_pending[MEMCG_NR_STAT]; 99 unsigned long events_pending[NR_VM_EVENT_ITEMS]; 100 }; 101 102 struct mem_cgroup_reclaim_iter { 103 struct mem_cgroup *position; 104 /* scan generation, increased every round-trip */ 105 unsigned int generation; 106 }; 107 108 /* 109 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 110 * shrinkers, which have elements charged to this memcg. 111 */ 112 struct shrinker_info { 113 struct rcu_head rcu; 114 atomic_long_t *nr_deferred; 115 unsigned long *map; 116 }; 117 118 struct lruvec_stats_percpu { 119 /* Local (CPU and cgroup) state */ 120 long state[NR_VM_NODE_STAT_ITEMS]; 121 122 /* Delta calculation for lockless upward propagation */ 123 long state_prev[NR_VM_NODE_STAT_ITEMS]; 124 }; 125 126 struct lruvec_stats { 127 /* Aggregated (CPU and subtree) state */ 128 long state[NR_VM_NODE_STAT_ITEMS]; 129 130 /* Pending child counts during tree propagation */ 131 long state_pending[NR_VM_NODE_STAT_ITEMS]; 132 }; 133 134 /* 135 * per-node information in memory controller. 136 */ 137 struct mem_cgroup_per_node { 138 struct lruvec lruvec; 139 140 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 141 struct lruvec_stats lruvec_stats; 142 143 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 144 145 struct mem_cgroup_reclaim_iter iter; 146 147 struct shrinker_info __rcu *shrinker_info; 148 149 struct rb_node tree_node; /* RB tree node */ 150 unsigned long usage_in_excess;/* Set to the value by which */ 151 /* the soft limit is exceeded*/ 152 bool on_tree; 153 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 154 /* use container_of */ 155 }; 156 157 struct mem_cgroup_threshold { 158 struct eventfd_ctx *eventfd; 159 unsigned long threshold; 160 }; 161 162 /* For threshold */ 163 struct mem_cgroup_threshold_ary { 164 /* An array index points to threshold just below or equal to usage. */ 165 int current_threshold; 166 /* Size of entries[] */ 167 unsigned int size; 168 /* Array of thresholds */ 169 struct mem_cgroup_threshold entries[]; 170 }; 171 172 struct mem_cgroup_thresholds { 173 /* Primary thresholds array */ 174 struct mem_cgroup_threshold_ary *primary; 175 /* 176 * Spare threshold array. 177 * This is needed to make mem_cgroup_unregister_event() "never fail". 178 * It must be able to store at least primary->size - 1 entries. 179 */ 180 struct mem_cgroup_threshold_ary *spare; 181 }; 182 183 #if defined(CONFIG_SMP) 184 struct memcg_padding { 185 char x[0]; 186 } ____cacheline_internodealigned_in_smp; 187 #define MEMCG_PADDING(name) struct memcg_padding name 188 #else 189 #define MEMCG_PADDING(name) 190 #endif 191 192 /* 193 * Remember four most recent foreign writebacks with dirty pages in this 194 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 195 * one in a given round, we're likely to catch it later if it keeps 196 * foreign-dirtying, so a fairly low count should be enough. 197 * 198 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 199 */ 200 #define MEMCG_CGWB_FRN_CNT 4 201 202 struct memcg_cgwb_frn { 203 u64 bdi_id; /* bdi->id of the foreign inode */ 204 int memcg_id; /* memcg->css.id of foreign inode */ 205 u64 at; /* jiffies_64 at the time of dirtying */ 206 struct wb_completion done; /* tracks in-flight foreign writebacks */ 207 }; 208 209 /* 210 * Bucket for arbitrarily byte-sized objects charged to a memory 211 * cgroup. The bucket can be reparented in one piece when the cgroup 212 * is destroyed, without having to round up the individual references 213 * of all live memory objects in the wild. 214 */ 215 struct obj_cgroup { 216 struct percpu_ref refcnt; 217 struct mem_cgroup *memcg; 218 atomic_t nr_charged_bytes; 219 union { 220 struct list_head list; 221 struct rcu_head rcu; 222 }; 223 }; 224 225 /* 226 * The memory controller data structure. The memory controller controls both 227 * page cache and RSS per cgroup. We would eventually like to provide 228 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 229 * to help the administrator determine what knobs to tune. 230 */ 231 struct mem_cgroup { 232 struct cgroup_subsys_state css; 233 234 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 235 struct mem_cgroup_id id; 236 237 /* Accounted resources */ 238 struct page_counter memory; /* Both v1 & v2 */ 239 240 union { 241 struct page_counter swap; /* v2 only */ 242 struct page_counter memsw; /* v1 only */ 243 }; 244 245 /* Legacy consumer-oriented counters */ 246 struct page_counter kmem; /* v1 only */ 247 struct page_counter tcpmem; /* v1 only */ 248 249 /* Range enforcement for interrupt charges */ 250 struct work_struct high_work; 251 252 unsigned long soft_limit; 253 254 /* vmpressure notifications */ 255 struct vmpressure vmpressure; 256 257 /* 258 * Should the OOM killer kill all belonging tasks, had it kill one? 259 */ 260 bool oom_group; 261 262 /* protected by memcg_oom_lock */ 263 bool oom_lock; 264 int under_oom; 265 266 int swappiness; 267 /* OOM-Killer disable */ 268 int oom_kill_disable; 269 270 /* memory.events and memory.events.local */ 271 struct cgroup_file events_file; 272 struct cgroup_file events_local_file; 273 274 /* handle for "memory.swap.events" */ 275 struct cgroup_file swap_events_file; 276 277 /* protect arrays of thresholds */ 278 struct mutex thresholds_lock; 279 280 /* thresholds for memory usage. RCU-protected */ 281 struct mem_cgroup_thresholds thresholds; 282 283 /* thresholds for mem+swap usage. RCU-protected */ 284 struct mem_cgroup_thresholds memsw_thresholds; 285 286 /* For oom notifier event fd */ 287 struct list_head oom_notify; 288 289 /* 290 * Should we move charges of a task when a task is moved into this 291 * mem_cgroup ? And what type of charges should we move ? 292 */ 293 unsigned long move_charge_at_immigrate; 294 /* taken only while moving_account > 0 */ 295 spinlock_t move_lock; 296 unsigned long move_lock_flags; 297 298 MEMCG_PADDING(_pad1_); 299 300 /* memory.stat */ 301 struct memcg_vmstats vmstats; 302 303 /* memory.events */ 304 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 305 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 306 307 unsigned long socket_pressure; 308 309 /* Legacy tcp memory accounting */ 310 bool tcpmem_active; 311 int tcpmem_pressure; 312 313 #ifdef CONFIG_MEMCG_KMEM 314 int kmemcg_id; 315 struct obj_cgroup __rcu *objcg; 316 struct list_head objcg_list; /* list of inherited objcgs */ 317 #endif 318 319 MEMCG_PADDING(_pad2_); 320 321 /* 322 * set > 0 if pages under this cgroup are moving to other cgroup. 323 */ 324 atomic_t moving_account; 325 struct task_struct *move_lock_task; 326 327 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 328 329 #ifdef CONFIG_CGROUP_WRITEBACK 330 struct list_head cgwb_list; 331 struct wb_domain cgwb_domain; 332 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 333 #endif 334 335 /* List of events which userspace want to receive */ 336 struct list_head event_list; 337 spinlock_t event_list_lock; 338 339 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 340 struct deferred_split deferred_split_queue; 341 #endif 342 343 struct mem_cgroup_per_node *nodeinfo[]; 344 }; 345 346 /* 347 * size of first charge trial. "32" comes from vmscan.c's magic value. 348 * TODO: maybe necessary to use big numbers in big irons. 349 */ 350 #define MEMCG_CHARGE_BATCH 32U 351 352 extern struct mem_cgroup *root_mem_cgroup; 353 354 enum page_memcg_data_flags { 355 /* page->memcg_data is a pointer to an objcgs vector */ 356 MEMCG_DATA_OBJCGS = (1UL << 0), 357 /* page has been accounted as a non-slab kernel page */ 358 MEMCG_DATA_KMEM = (1UL << 1), 359 /* the next bit after the last actual flag */ 360 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 361 }; 362 363 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 364 365 static inline bool folio_memcg_kmem(struct folio *folio); 366 367 /* 368 * After the initialization objcg->memcg is always pointing at 369 * a valid memcg, but can be atomically swapped to the parent memcg. 370 * 371 * The caller must ensure that the returned memcg won't be released: 372 * e.g. acquire the rcu_read_lock or css_set_lock. 373 */ 374 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 375 { 376 return READ_ONCE(objcg->memcg); 377 } 378 379 /* 380 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 381 * @folio: Pointer to the folio. 382 * 383 * Returns a pointer to the memory cgroup associated with the folio, 384 * or NULL. This function assumes that the folio is known to have a 385 * proper memory cgroup pointer. It's not safe to call this function 386 * against some type of folios, e.g. slab folios or ex-slab folios or 387 * kmem folios. 388 */ 389 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 390 { 391 unsigned long memcg_data = folio->memcg_data; 392 393 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 394 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 395 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 396 397 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 398 } 399 400 /* 401 * __folio_objcg - get the object cgroup associated with a kmem folio. 402 * @folio: Pointer to the folio. 403 * 404 * Returns a pointer to the object cgroup associated with the folio, 405 * or NULL. This function assumes that the folio is known to have a 406 * proper object cgroup pointer. It's not safe to call this function 407 * against some type of folios, e.g. slab folios or ex-slab folios or 408 * LRU folios. 409 */ 410 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 411 { 412 unsigned long memcg_data = folio->memcg_data; 413 414 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 415 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 416 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 417 418 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 419 } 420 421 /* 422 * folio_memcg - Get the memory cgroup associated with a folio. 423 * @folio: Pointer to the folio. 424 * 425 * Returns a pointer to the memory cgroup associated with the folio, 426 * or NULL. This function assumes that the folio is known to have a 427 * proper memory cgroup pointer. It's not safe to call this function 428 * against some type of folios, e.g. slab folios or ex-slab folios. 429 * 430 * For a non-kmem folio any of the following ensures folio and memcg binding 431 * stability: 432 * 433 * - the folio lock 434 * - LRU isolation 435 * - lock_page_memcg() 436 * - exclusive reference 437 * 438 * For a kmem folio a caller should hold an rcu read lock to protect memcg 439 * associated with a kmem folio from being released. 440 */ 441 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 442 { 443 if (folio_memcg_kmem(folio)) 444 return obj_cgroup_memcg(__folio_objcg(folio)); 445 return __folio_memcg(folio); 446 } 447 448 static inline struct mem_cgroup *page_memcg(struct page *page) 449 { 450 return folio_memcg(page_folio(page)); 451 } 452 453 /** 454 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 455 * @folio: Pointer to the folio. 456 * 457 * This function assumes that the folio is known to have a 458 * proper memory cgroup pointer. It's not safe to call this function 459 * against some type of folios, e.g. slab folios or ex-slab folios. 460 * 461 * Return: A pointer to the memory cgroup associated with the folio, 462 * or NULL. 463 */ 464 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 465 { 466 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 467 468 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 469 WARN_ON_ONCE(!rcu_read_lock_held()); 470 471 if (memcg_data & MEMCG_DATA_KMEM) { 472 struct obj_cgroup *objcg; 473 474 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 475 return obj_cgroup_memcg(objcg); 476 } 477 478 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 479 } 480 481 /* 482 * page_memcg_check - get the memory cgroup associated with a page 483 * @page: a pointer to the page struct 484 * 485 * Returns a pointer to the memory cgroup associated with the page, 486 * or NULL. This function unlike page_memcg() can take any page 487 * as an argument. It has to be used in cases when it's not known if a page 488 * has an associated memory cgroup pointer or an object cgroups vector or 489 * an object cgroup. 490 * 491 * For a non-kmem page any of the following ensures page and memcg binding 492 * stability: 493 * 494 * - the page lock 495 * - LRU isolation 496 * - lock_page_memcg() 497 * - exclusive reference 498 * 499 * For a kmem page a caller should hold an rcu read lock to protect memcg 500 * associated with a kmem page from being released. 501 */ 502 static inline struct mem_cgroup *page_memcg_check(struct page *page) 503 { 504 /* 505 * Because page->memcg_data might be changed asynchronously 506 * for slab pages, READ_ONCE() should be used here. 507 */ 508 unsigned long memcg_data = READ_ONCE(page->memcg_data); 509 510 if (memcg_data & MEMCG_DATA_OBJCGS) 511 return NULL; 512 513 if (memcg_data & MEMCG_DATA_KMEM) { 514 struct obj_cgroup *objcg; 515 516 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 517 return obj_cgroup_memcg(objcg); 518 } 519 520 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 521 } 522 523 #ifdef CONFIG_MEMCG_KMEM 524 /* 525 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 526 * @folio: Pointer to the folio. 527 * 528 * Checks if the folio has MemcgKmem flag set. The caller must ensure 529 * that the folio has an associated memory cgroup. It's not safe to call 530 * this function against some types of folios, e.g. slab folios. 531 */ 532 static inline bool folio_memcg_kmem(struct folio *folio) 533 { 534 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 535 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 536 return folio->memcg_data & MEMCG_DATA_KMEM; 537 } 538 539 540 #else 541 static inline bool folio_memcg_kmem(struct folio *folio) 542 { 543 return false; 544 } 545 546 #endif 547 548 static inline bool PageMemcgKmem(struct page *page) 549 { 550 return folio_memcg_kmem(page_folio(page)); 551 } 552 553 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 554 { 555 return (memcg == root_mem_cgroup); 556 } 557 558 static inline bool mem_cgroup_disabled(void) 559 { 560 return !cgroup_subsys_enabled(memory_cgrp_subsys); 561 } 562 563 static inline void mem_cgroup_protection(struct mem_cgroup *root, 564 struct mem_cgroup *memcg, 565 unsigned long *min, 566 unsigned long *low) 567 { 568 *min = *low = 0; 569 570 if (mem_cgroup_disabled()) 571 return; 572 573 /* 574 * There is no reclaim protection applied to a targeted reclaim. 575 * We are special casing this specific case here because 576 * mem_cgroup_protected calculation is not robust enough to keep 577 * the protection invariant for calculated effective values for 578 * parallel reclaimers with different reclaim target. This is 579 * especially a problem for tail memcgs (as they have pages on LRU) 580 * which would want to have effective values 0 for targeted reclaim 581 * but a different value for external reclaim. 582 * 583 * Example 584 * Let's have global and A's reclaim in parallel: 585 * | 586 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 587 * |\ 588 * | C (low = 1G, usage = 2.5G) 589 * B (low = 1G, usage = 0.5G) 590 * 591 * For the global reclaim 592 * A.elow = A.low 593 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 594 * C.elow = min(C.usage, C.low) 595 * 596 * With the effective values resetting we have A reclaim 597 * A.elow = 0 598 * B.elow = B.low 599 * C.elow = C.low 600 * 601 * If the global reclaim races with A's reclaim then 602 * B.elow = C.elow = 0 because children_low_usage > A.elow) 603 * is possible and reclaiming B would be violating the protection. 604 * 605 */ 606 if (root == memcg) 607 return; 608 609 *min = READ_ONCE(memcg->memory.emin); 610 *low = READ_ONCE(memcg->memory.elow); 611 } 612 613 void mem_cgroup_calculate_protection(struct mem_cgroup *root, 614 struct mem_cgroup *memcg); 615 616 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) 617 { 618 /* 619 * The root memcg doesn't account charges, and doesn't support 620 * protection. 621 */ 622 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); 623 624 } 625 626 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 627 { 628 if (!mem_cgroup_supports_protection(memcg)) 629 return false; 630 631 return READ_ONCE(memcg->memory.elow) >= 632 page_counter_read(&memcg->memory); 633 } 634 635 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 636 { 637 if (!mem_cgroup_supports_protection(memcg)) 638 return false; 639 640 return READ_ONCE(memcg->memory.emin) >= 641 page_counter_read(&memcg->memory); 642 } 643 644 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 645 646 /** 647 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 648 * @folio: Folio to charge. 649 * @mm: mm context of the allocating task. 650 * @gfp: Reclaim mode. 651 * 652 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 653 * pages according to @gfp if necessary. If @mm is NULL, try to 654 * charge to the active memcg. 655 * 656 * Do not use this for folios allocated for swapin. 657 * 658 * Return: 0 on success. Otherwise, an error code is returned. 659 */ 660 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 661 gfp_t gfp) 662 { 663 if (mem_cgroup_disabled()) 664 return 0; 665 return __mem_cgroup_charge(folio, mm, gfp); 666 } 667 668 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, 669 gfp_t gfp, swp_entry_t entry); 670 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 671 672 void __mem_cgroup_uncharge(struct folio *folio); 673 674 /** 675 * mem_cgroup_uncharge - Uncharge a folio. 676 * @folio: Folio to uncharge. 677 * 678 * Uncharge a folio previously charged with mem_cgroup_charge(). 679 */ 680 static inline void mem_cgroup_uncharge(struct folio *folio) 681 { 682 if (mem_cgroup_disabled()) 683 return; 684 __mem_cgroup_uncharge(folio); 685 } 686 687 void __mem_cgroup_uncharge_list(struct list_head *page_list); 688 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 689 { 690 if (mem_cgroup_disabled()) 691 return; 692 __mem_cgroup_uncharge_list(page_list); 693 } 694 695 void mem_cgroup_migrate(struct folio *old, struct folio *new); 696 697 /** 698 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 699 * @memcg: memcg of the wanted lruvec 700 * @pgdat: pglist_data 701 * 702 * Returns the lru list vector holding pages for a given @memcg & 703 * @pgdat combination. This can be the node lruvec, if the memory 704 * controller is disabled. 705 */ 706 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 707 struct pglist_data *pgdat) 708 { 709 struct mem_cgroup_per_node *mz; 710 struct lruvec *lruvec; 711 712 if (mem_cgroup_disabled()) { 713 lruvec = &pgdat->__lruvec; 714 goto out; 715 } 716 717 if (!memcg) 718 memcg = root_mem_cgroup; 719 720 mz = memcg->nodeinfo[pgdat->node_id]; 721 lruvec = &mz->lruvec; 722 out: 723 /* 724 * Since a node can be onlined after the mem_cgroup was created, 725 * we have to be prepared to initialize lruvec->pgdat here; 726 * and if offlined then reonlined, we need to reinitialize it. 727 */ 728 if (unlikely(lruvec->pgdat != pgdat)) 729 lruvec->pgdat = pgdat; 730 return lruvec; 731 } 732 733 /** 734 * folio_lruvec - return lruvec for isolating/putting an LRU folio 735 * @folio: Pointer to the folio. 736 * 737 * This function relies on folio->mem_cgroup being stable. 738 */ 739 static inline struct lruvec *folio_lruvec(struct folio *folio) 740 { 741 struct mem_cgroup *memcg = folio_memcg(folio); 742 743 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 744 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 745 } 746 747 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 748 749 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 750 751 struct lruvec *folio_lruvec_lock(struct folio *folio); 752 struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 753 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 754 unsigned long *flags); 755 756 #ifdef CONFIG_DEBUG_VM 757 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 758 #else 759 static inline 760 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 761 { 762 } 763 #endif 764 765 static inline 766 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 767 return css ? container_of(css, struct mem_cgroup, css) : NULL; 768 } 769 770 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 771 { 772 return percpu_ref_tryget(&objcg->refcnt); 773 } 774 775 static inline void obj_cgroup_get(struct obj_cgroup *objcg) 776 { 777 percpu_ref_get(&objcg->refcnt); 778 } 779 780 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 781 unsigned long nr) 782 { 783 percpu_ref_get_many(&objcg->refcnt, nr); 784 } 785 786 static inline void obj_cgroup_put(struct obj_cgroup *objcg) 787 { 788 percpu_ref_put(&objcg->refcnt); 789 } 790 791 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 792 { 793 if (memcg) 794 css_put(&memcg->css); 795 } 796 797 #define mem_cgroup_from_counter(counter, member) \ 798 container_of(counter, struct mem_cgroup, member) 799 800 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 801 struct mem_cgroup *, 802 struct mem_cgroup_reclaim_cookie *); 803 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 804 int mem_cgroup_scan_tasks(struct mem_cgroup *, 805 int (*)(struct task_struct *, void *), void *); 806 807 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 808 { 809 if (mem_cgroup_disabled()) 810 return 0; 811 812 return memcg->id.id; 813 } 814 struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 815 816 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 817 { 818 return mem_cgroup_from_css(seq_css(m)); 819 } 820 821 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 822 { 823 struct mem_cgroup_per_node *mz; 824 825 if (mem_cgroup_disabled()) 826 return NULL; 827 828 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 829 return mz->memcg; 830 } 831 832 /** 833 * parent_mem_cgroup - find the accounting parent of a memcg 834 * @memcg: memcg whose parent to find 835 * 836 * Returns the parent memcg, or NULL if this is the root or the memory 837 * controller is in legacy no-hierarchy mode. 838 */ 839 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 840 { 841 if (!memcg->memory.parent) 842 return NULL; 843 return mem_cgroup_from_counter(memcg->memory.parent, memory); 844 } 845 846 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 847 struct mem_cgroup *root) 848 { 849 if (root == memcg) 850 return true; 851 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 852 } 853 854 static inline bool mm_match_cgroup(struct mm_struct *mm, 855 struct mem_cgroup *memcg) 856 { 857 struct mem_cgroup *task_memcg; 858 bool match = false; 859 860 rcu_read_lock(); 861 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 862 if (task_memcg) 863 match = mem_cgroup_is_descendant(task_memcg, memcg); 864 rcu_read_unlock(); 865 return match; 866 } 867 868 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 869 ino_t page_cgroup_ino(struct page *page); 870 871 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 872 { 873 if (mem_cgroup_disabled()) 874 return true; 875 return !!(memcg->css.flags & CSS_ONLINE); 876 } 877 878 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 879 int zid, int nr_pages); 880 881 static inline 882 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 883 enum lru_list lru, int zone_idx) 884 { 885 struct mem_cgroup_per_node *mz; 886 887 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 888 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 889 } 890 891 void mem_cgroup_handle_over_high(void); 892 893 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 894 895 unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 896 897 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 898 struct task_struct *p); 899 900 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 901 902 static inline void mem_cgroup_enter_user_fault(void) 903 { 904 WARN_ON(current->in_user_fault); 905 current->in_user_fault = 1; 906 } 907 908 static inline void mem_cgroup_exit_user_fault(void) 909 { 910 WARN_ON(!current->in_user_fault); 911 current->in_user_fault = 0; 912 } 913 914 static inline bool task_in_memcg_oom(struct task_struct *p) 915 { 916 return p->memcg_in_oom; 917 } 918 919 bool mem_cgroup_oom_synchronize(bool wait); 920 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 921 struct mem_cgroup *oom_domain); 922 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 923 924 #ifdef CONFIG_MEMCG_SWAP 925 extern bool cgroup_memory_noswap; 926 #endif 927 928 void folio_memcg_lock(struct folio *folio); 929 void folio_memcg_unlock(struct folio *folio); 930 void lock_page_memcg(struct page *page); 931 void unlock_page_memcg(struct page *page); 932 933 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 934 935 /* idx can be of type enum memcg_stat_item or node_stat_item */ 936 static inline void mod_memcg_state(struct mem_cgroup *memcg, 937 int idx, int val) 938 { 939 unsigned long flags; 940 941 local_irq_save(flags); 942 __mod_memcg_state(memcg, idx, val); 943 local_irq_restore(flags); 944 } 945 946 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 947 { 948 return READ_ONCE(memcg->vmstats.state[idx]); 949 } 950 951 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 952 enum node_stat_item idx) 953 { 954 struct mem_cgroup_per_node *pn; 955 956 if (mem_cgroup_disabled()) 957 return node_page_state(lruvec_pgdat(lruvec), idx); 958 959 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 960 return READ_ONCE(pn->lruvec_stats.state[idx]); 961 } 962 963 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 964 enum node_stat_item idx) 965 { 966 struct mem_cgroup_per_node *pn; 967 long x = 0; 968 int cpu; 969 970 if (mem_cgroup_disabled()) 971 return node_page_state(lruvec_pgdat(lruvec), idx); 972 973 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 974 for_each_possible_cpu(cpu) 975 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); 976 #ifdef CONFIG_SMP 977 if (x < 0) 978 x = 0; 979 #endif 980 return x; 981 } 982 983 void mem_cgroup_flush_stats(void); 984 985 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 986 int val); 987 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 988 989 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 990 int val) 991 { 992 unsigned long flags; 993 994 local_irq_save(flags); 995 __mod_lruvec_kmem_state(p, idx, val); 996 local_irq_restore(flags); 997 } 998 999 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1000 enum node_stat_item idx, int val) 1001 { 1002 unsigned long flags; 1003 1004 local_irq_save(flags); 1005 __mod_memcg_lruvec_state(lruvec, idx, val); 1006 local_irq_restore(flags); 1007 } 1008 1009 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1010 unsigned long count); 1011 1012 static inline void count_memcg_events(struct mem_cgroup *memcg, 1013 enum vm_event_item idx, 1014 unsigned long count) 1015 { 1016 unsigned long flags; 1017 1018 local_irq_save(flags); 1019 __count_memcg_events(memcg, idx, count); 1020 local_irq_restore(flags); 1021 } 1022 1023 static inline void count_memcg_page_event(struct page *page, 1024 enum vm_event_item idx) 1025 { 1026 struct mem_cgroup *memcg = page_memcg(page); 1027 1028 if (memcg) 1029 count_memcg_events(memcg, idx, 1); 1030 } 1031 1032 static inline void count_memcg_event_mm(struct mm_struct *mm, 1033 enum vm_event_item idx) 1034 { 1035 struct mem_cgroup *memcg; 1036 1037 if (mem_cgroup_disabled()) 1038 return; 1039 1040 rcu_read_lock(); 1041 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1042 if (likely(memcg)) 1043 count_memcg_events(memcg, idx, 1); 1044 rcu_read_unlock(); 1045 } 1046 1047 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1048 enum memcg_memory_event event) 1049 { 1050 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1051 event == MEMCG_SWAP_FAIL; 1052 1053 atomic_long_inc(&memcg->memory_events_local[event]); 1054 if (!swap_event) 1055 cgroup_file_notify(&memcg->events_local_file); 1056 1057 do { 1058 atomic_long_inc(&memcg->memory_events[event]); 1059 if (swap_event) 1060 cgroup_file_notify(&memcg->swap_events_file); 1061 else 1062 cgroup_file_notify(&memcg->events_file); 1063 1064 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1065 break; 1066 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1067 break; 1068 } while ((memcg = parent_mem_cgroup(memcg)) && 1069 !mem_cgroup_is_root(memcg)); 1070 } 1071 1072 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1073 enum memcg_memory_event event) 1074 { 1075 struct mem_cgroup *memcg; 1076 1077 if (mem_cgroup_disabled()) 1078 return; 1079 1080 rcu_read_lock(); 1081 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1082 if (likely(memcg)) 1083 memcg_memory_event(memcg, event); 1084 rcu_read_unlock(); 1085 } 1086 1087 void split_page_memcg(struct page *head, unsigned int nr); 1088 1089 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1090 gfp_t gfp_mask, 1091 unsigned long *total_scanned); 1092 1093 #else /* CONFIG_MEMCG */ 1094 1095 #define MEM_CGROUP_ID_SHIFT 0 1096 #define MEM_CGROUP_ID_MAX 0 1097 1098 static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1099 { 1100 return NULL; 1101 } 1102 1103 static inline struct mem_cgroup *page_memcg(struct page *page) 1104 { 1105 return NULL; 1106 } 1107 1108 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1109 { 1110 WARN_ON_ONCE(!rcu_read_lock_held()); 1111 return NULL; 1112 } 1113 1114 static inline struct mem_cgroup *page_memcg_check(struct page *page) 1115 { 1116 return NULL; 1117 } 1118 1119 static inline bool folio_memcg_kmem(struct folio *folio) 1120 { 1121 return false; 1122 } 1123 1124 static inline bool PageMemcgKmem(struct page *page) 1125 { 1126 return false; 1127 } 1128 1129 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1130 { 1131 return true; 1132 } 1133 1134 static inline bool mem_cgroup_disabled(void) 1135 { 1136 return true; 1137 } 1138 1139 static inline void memcg_memory_event(struct mem_cgroup *memcg, 1140 enum memcg_memory_event event) 1141 { 1142 } 1143 1144 static inline void memcg_memory_event_mm(struct mm_struct *mm, 1145 enum memcg_memory_event event) 1146 { 1147 } 1148 1149 static inline void mem_cgroup_protection(struct mem_cgroup *root, 1150 struct mem_cgroup *memcg, 1151 unsigned long *min, 1152 unsigned long *low) 1153 { 1154 *min = *low = 0; 1155 } 1156 1157 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1158 struct mem_cgroup *memcg) 1159 { 1160 } 1161 1162 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) 1163 { 1164 return false; 1165 } 1166 1167 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) 1168 { 1169 return false; 1170 } 1171 1172 static inline int mem_cgroup_charge(struct folio *folio, 1173 struct mm_struct *mm, gfp_t gfp) 1174 { 1175 return 0; 1176 } 1177 1178 static inline int mem_cgroup_swapin_charge_page(struct page *page, 1179 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1180 { 1181 return 0; 1182 } 1183 1184 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1185 { 1186 } 1187 1188 static inline void mem_cgroup_uncharge(struct folio *folio) 1189 { 1190 } 1191 1192 static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1193 { 1194 } 1195 1196 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1197 { 1198 } 1199 1200 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1201 struct pglist_data *pgdat) 1202 { 1203 return &pgdat->__lruvec; 1204 } 1205 1206 static inline struct lruvec *folio_lruvec(struct folio *folio) 1207 { 1208 struct pglist_data *pgdat = folio_pgdat(folio); 1209 return &pgdat->__lruvec; 1210 } 1211 1212 static inline 1213 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1214 { 1215 } 1216 1217 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1218 { 1219 return NULL; 1220 } 1221 1222 static inline bool mm_match_cgroup(struct mm_struct *mm, 1223 struct mem_cgroup *memcg) 1224 { 1225 return true; 1226 } 1227 1228 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1229 { 1230 return NULL; 1231 } 1232 1233 static inline 1234 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1235 { 1236 return NULL; 1237 } 1238 1239 static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1240 { 1241 } 1242 1243 static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1244 { 1245 struct pglist_data *pgdat = folio_pgdat(folio); 1246 1247 spin_lock(&pgdat->__lruvec.lru_lock); 1248 return &pgdat->__lruvec; 1249 } 1250 1251 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1252 { 1253 struct pglist_data *pgdat = folio_pgdat(folio); 1254 1255 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1256 return &pgdat->__lruvec; 1257 } 1258 1259 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1260 unsigned long *flagsp) 1261 { 1262 struct pglist_data *pgdat = folio_pgdat(folio); 1263 1264 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1265 return &pgdat->__lruvec; 1266 } 1267 1268 static inline struct mem_cgroup * 1269 mem_cgroup_iter(struct mem_cgroup *root, 1270 struct mem_cgroup *prev, 1271 struct mem_cgroup_reclaim_cookie *reclaim) 1272 { 1273 return NULL; 1274 } 1275 1276 static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1277 struct mem_cgroup *prev) 1278 { 1279 } 1280 1281 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1282 int (*fn)(struct task_struct *, void *), void *arg) 1283 { 1284 return 0; 1285 } 1286 1287 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1288 { 1289 return 0; 1290 } 1291 1292 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1293 { 1294 WARN_ON_ONCE(id); 1295 /* XXX: This should always return root_mem_cgroup */ 1296 return NULL; 1297 } 1298 1299 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1300 { 1301 return NULL; 1302 } 1303 1304 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1305 { 1306 return NULL; 1307 } 1308 1309 static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1310 { 1311 return true; 1312 } 1313 1314 static inline 1315 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1316 enum lru_list lru, int zone_idx) 1317 { 1318 return 0; 1319 } 1320 1321 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1322 { 1323 return 0; 1324 } 1325 1326 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1327 { 1328 return 0; 1329 } 1330 1331 static inline void 1332 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1333 { 1334 } 1335 1336 static inline void 1337 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1338 { 1339 } 1340 1341 static inline void lock_page_memcg(struct page *page) 1342 { 1343 } 1344 1345 static inline void unlock_page_memcg(struct page *page) 1346 { 1347 } 1348 1349 static inline void folio_memcg_lock(struct folio *folio) 1350 { 1351 } 1352 1353 static inline void folio_memcg_unlock(struct folio *folio) 1354 { 1355 } 1356 1357 static inline void mem_cgroup_handle_over_high(void) 1358 { 1359 } 1360 1361 static inline void mem_cgroup_enter_user_fault(void) 1362 { 1363 } 1364 1365 static inline void mem_cgroup_exit_user_fault(void) 1366 { 1367 } 1368 1369 static inline bool task_in_memcg_oom(struct task_struct *p) 1370 { 1371 return false; 1372 } 1373 1374 static inline bool mem_cgroup_oom_synchronize(bool wait) 1375 { 1376 return false; 1377 } 1378 1379 static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1380 struct task_struct *victim, struct mem_cgroup *oom_domain) 1381 { 1382 return NULL; 1383 } 1384 1385 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1386 { 1387 } 1388 1389 static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1390 int idx, 1391 int nr) 1392 { 1393 } 1394 1395 static inline void mod_memcg_state(struct mem_cgroup *memcg, 1396 int idx, 1397 int nr) 1398 { 1399 } 1400 1401 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1402 { 1403 return 0; 1404 } 1405 1406 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1407 enum node_stat_item idx) 1408 { 1409 return node_page_state(lruvec_pgdat(lruvec), idx); 1410 } 1411 1412 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1413 enum node_stat_item idx) 1414 { 1415 return node_page_state(lruvec_pgdat(lruvec), idx); 1416 } 1417 1418 static inline void mem_cgroup_flush_stats(void) 1419 { 1420 } 1421 1422 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1423 enum node_stat_item idx, int val) 1424 { 1425 } 1426 1427 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1428 int val) 1429 { 1430 struct page *page = virt_to_head_page(p); 1431 1432 __mod_node_page_state(page_pgdat(page), idx, val); 1433 } 1434 1435 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1436 int val) 1437 { 1438 struct page *page = virt_to_head_page(p); 1439 1440 mod_node_page_state(page_pgdat(page), idx, val); 1441 } 1442 1443 static inline void count_memcg_events(struct mem_cgroup *memcg, 1444 enum vm_event_item idx, 1445 unsigned long count) 1446 { 1447 } 1448 1449 static inline void __count_memcg_events(struct mem_cgroup *memcg, 1450 enum vm_event_item idx, 1451 unsigned long count) 1452 { 1453 } 1454 1455 static inline void count_memcg_page_event(struct page *page, 1456 int idx) 1457 { 1458 } 1459 1460 static inline 1461 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1462 { 1463 } 1464 1465 static inline void split_page_memcg(struct page *head, unsigned int nr) 1466 { 1467 } 1468 1469 static inline 1470 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1471 gfp_t gfp_mask, 1472 unsigned long *total_scanned) 1473 { 1474 return 0; 1475 } 1476 #endif /* CONFIG_MEMCG */ 1477 1478 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1479 { 1480 __mod_lruvec_kmem_state(p, idx, 1); 1481 } 1482 1483 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1484 { 1485 __mod_lruvec_kmem_state(p, idx, -1); 1486 } 1487 1488 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1489 { 1490 struct mem_cgroup *memcg; 1491 1492 memcg = lruvec_memcg(lruvec); 1493 if (!memcg) 1494 return NULL; 1495 memcg = parent_mem_cgroup(memcg); 1496 if (!memcg) 1497 return NULL; 1498 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1499 } 1500 1501 static inline void unlock_page_lruvec(struct lruvec *lruvec) 1502 { 1503 spin_unlock(&lruvec->lru_lock); 1504 } 1505 1506 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1507 { 1508 spin_unlock_irq(&lruvec->lru_lock); 1509 } 1510 1511 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1512 unsigned long flags) 1513 { 1514 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1515 } 1516 1517 /* Test requires a stable page->memcg binding, see page_memcg() */ 1518 static inline bool folio_matches_lruvec(struct folio *folio, 1519 struct lruvec *lruvec) 1520 { 1521 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1522 lruvec_memcg(lruvec) == folio_memcg(folio); 1523 } 1524 1525 /* Don't lock again iff page's lruvec locked */ 1526 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1527 struct lruvec *locked_lruvec) 1528 { 1529 if (locked_lruvec) { 1530 if (folio_matches_lruvec(folio, locked_lruvec)) 1531 return locked_lruvec; 1532 1533 unlock_page_lruvec_irq(locked_lruvec); 1534 } 1535 1536 return folio_lruvec_lock_irq(folio); 1537 } 1538 1539 /* Don't lock again iff page's lruvec locked */ 1540 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1541 struct lruvec *locked_lruvec, unsigned long *flags) 1542 { 1543 if (locked_lruvec) { 1544 if (folio_matches_lruvec(folio, locked_lruvec)) 1545 return locked_lruvec; 1546 1547 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1548 } 1549 1550 return folio_lruvec_lock_irqsave(folio, flags); 1551 } 1552 1553 #ifdef CONFIG_CGROUP_WRITEBACK 1554 1555 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1556 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1557 unsigned long *pheadroom, unsigned long *pdirty, 1558 unsigned long *pwriteback); 1559 1560 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1561 struct bdi_writeback *wb); 1562 1563 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1564 struct bdi_writeback *wb) 1565 { 1566 if (mem_cgroup_disabled()) 1567 return; 1568 1569 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css)) 1570 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1571 } 1572 1573 void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1574 1575 #else /* CONFIG_CGROUP_WRITEBACK */ 1576 1577 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1578 { 1579 return NULL; 1580 } 1581 1582 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1583 unsigned long *pfilepages, 1584 unsigned long *pheadroom, 1585 unsigned long *pdirty, 1586 unsigned long *pwriteback) 1587 { 1588 } 1589 1590 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1591 struct bdi_writeback *wb) 1592 { 1593 } 1594 1595 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1596 { 1597 } 1598 1599 #endif /* CONFIG_CGROUP_WRITEBACK */ 1600 1601 struct sock; 1602 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1603 gfp_t gfp_mask); 1604 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1605 #ifdef CONFIG_MEMCG 1606 extern struct static_key_false memcg_sockets_enabled_key; 1607 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1608 void mem_cgroup_sk_alloc(struct sock *sk); 1609 void mem_cgroup_sk_free(struct sock *sk); 1610 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1611 { 1612 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1613 return true; 1614 do { 1615 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1616 return true; 1617 } while ((memcg = parent_mem_cgroup(memcg))); 1618 return false; 1619 } 1620 1621 int alloc_shrinker_info(struct mem_cgroup *memcg); 1622 void free_shrinker_info(struct mem_cgroup *memcg); 1623 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1624 void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1625 #else 1626 #define mem_cgroup_sockets_enabled 0 1627 static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1628 static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1629 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1630 { 1631 return false; 1632 } 1633 1634 static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1635 int nid, int shrinker_id) 1636 { 1637 } 1638 #endif 1639 1640 #ifdef CONFIG_MEMCG_KMEM 1641 bool mem_cgroup_kmem_disabled(void); 1642 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1643 void __memcg_kmem_uncharge_page(struct page *page, int order); 1644 1645 struct obj_cgroup *get_obj_cgroup_from_current(void); 1646 1647 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1648 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1649 1650 extern struct static_key_false memcg_kmem_enabled_key; 1651 1652 extern int memcg_nr_cache_ids; 1653 void memcg_get_cache_ids(void); 1654 void memcg_put_cache_ids(void); 1655 1656 /* 1657 * Helper macro to loop through all memcg-specific caches. Callers must still 1658 * check if the cache is valid (it is either valid or NULL). 1659 * the slab_mutex must be held when looping through those caches 1660 */ 1661 #define for_each_memcg_cache_index(_idx) \ 1662 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) 1663 1664 static inline bool memcg_kmem_enabled(void) 1665 { 1666 return static_branch_likely(&memcg_kmem_enabled_key); 1667 } 1668 1669 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1670 int order) 1671 { 1672 if (memcg_kmem_enabled()) 1673 return __memcg_kmem_charge_page(page, gfp, order); 1674 return 0; 1675 } 1676 1677 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1678 { 1679 if (memcg_kmem_enabled()) 1680 __memcg_kmem_uncharge_page(page, order); 1681 } 1682 1683 /* 1684 * A helper for accessing memcg's kmem_id, used for getting 1685 * corresponding LRU lists. 1686 */ 1687 static inline int memcg_cache_id(struct mem_cgroup *memcg) 1688 { 1689 return memcg ? memcg->kmemcg_id : -1; 1690 } 1691 1692 struct mem_cgroup *mem_cgroup_from_obj(void *p); 1693 1694 #else 1695 static inline bool mem_cgroup_kmem_disabled(void) 1696 { 1697 return true; 1698 } 1699 1700 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1701 int order) 1702 { 1703 return 0; 1704 } 1705 1706 static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1707 { 1708 } 1709 1710 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1711 int order) 1712 { 1713 return 0; 1714 } 1715 1716 static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1717 { 1718 } 1719 1720 #define for_each_memcg_cache_index(_idx) \ 1721 for (; NULL; ) 1722 1723 static inline bool memcg_kmem_enabled(void) 1724 { 1725 return false; 1726 } 1727 1728 static inline int memcg_cache_id(struct mem_cgroup *memcg) 1729 { 1730 return -1; 1731 } 1732 1733 static inline void memcg_get_cache_ids(void) 1734 { 1735 } 1736 1737 static inline void memcg_put_cache_ids(void) 1738 { 1739 } 1740 1741 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1742 { 1743 return NULL; 1744 } 1745 1746 #endif /* CONFIG_MEMCG_KMEM */ 1747 1748 #endif /* _LINUX_MEMCONTROL_H */ 1749