1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SWAP_H 3 #define _LINUX_SWAP_H 4 5 #include <linux/spinlock.h> 6 #include <linux/linkage.h> 7 #include <linux/mmzone.h> 8 #include <linux/list.h> 9 #include <linux/memcontrol.h> 10 #include <linux/sched.h> 11 #include <linux/node.h> 12 #include <linux/fs.h> 13 #include <linux/pagemap.h> 14 #include <linux/atomic.h> 15 #include <linux/page-flags.h> 16 #include <uapi/linux/mempolicy.h> 17 #include <asm/page.h> 18 19 struct notifier_block; 20 21 struct bio; 22 23 struct pagevec; 24 25 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 26 #define SWAP_FLAG_PRIO_MASK 0x7fff 27 #define SWAP_FLAG_PRIO_SHIFT 0 28 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ 29 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ 30 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ 31 32 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ 33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ 34 SWAP_FLAG_DISCARD_PAGES) 35 #define SWAP_BATCH 64 36 37 static inline int current_is_kswapd(void) 38 { 39 return current->flags & PF_KSWAPD; 40 } 41 42 /* 43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can 44 * be swapped to. The swap type and the offset into that swap type are 45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits 46 * for the type means that the maximum number of swapcache pages is 27 bits 47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs 48 * the type/offset into the pte as 5/27 as well. 49 */ 50 #define MAX_SWAPFILES_SHIFT 5 51 52 /* 53 * Use some of the swap files numbers for other purposes. This 54 * is a convenient way to hook into the VM to trigger special 55 * actions on faults. 56 */ 57 58 /* 59 * PTE markers are used to persist information onto PTEs that otherwise 60 * should be a none pte. As its name "PTE" hints, it should only be 61 * applied to the leaves of pgtables. 62 */ 63 #define SWP_PTE_MARKER_NUM 1 64 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ 65 SWP_MIGRATION_NUM + SWP_DEVICE_NUM) 66 67 /* 68 * Unaddressable device memory support. See include/linux/hmm.h and 69 * Documentation/mm/hmm.rst. Short description is we need struct pages for 70 * device memory that is unaddressable (inaccessible) by CPU, so that we can 71 * migrate part of a process memory to device memory. 72 * 73 * When a page is migrated from CPU to device, we set the CPU page table entry 74 * to a special SWP_DEVICE_{READ|WRITE} entry. 75 * 76 * When a page is mapped by the device for exclusive access we set the CPU page 77 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries. 78 */ 79 #ifdef CONFIG_DEVICE_PRIVATE 80 #define SWP_DEVICE_NUM 4 81 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) 82 #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) 83 #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) 84 #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3) 85 #else 86 #define SWP_DEVICE_NUM 0 87 #endif 88 89 /* 90 * Page migration support. 91 * 92 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and 93 * indicates that the referenced (part of) an anonymous page is exclusive to 94 * a single process. For SWP_MIGRATION_WRITE, that information is implicit: 95 * (part of) an anonymous page that are mapped writable are exclusive to a 96 * single process. 97 */ 98 #ifdef CONFIG_MIGRATION 99 #define SWP_MIGRATION_NUM 3 100 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) 101 #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) 102 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) 103 #else 104 #define SWP_MIGRATION_NUM 0 105 #endif 106 107 /* 108 * Handling of hardware poisoned pages with memory corruption. 109 */ 110 #ifdef CONFIG_MEMORY_FAILURE 111 #define SWP_HWPOISON_NUM 1 112 #define SWP_HWPOISON MAX_SWAPFILES 113 #else 114 #define SWP_HWPOISON_NUM 0 115 #endif 116 117 #define MAX_SWAPFILES \ 118 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ 119 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ 120 SWP_PTE_MARKER_NUM) 121 122 /* 123 * Magic header for a swap area. The first part of the union is 124 * what the swap magic looks like for the old (limited to 128MB) 125 * swap area format, the second part of the union adds - in the 126 * old reserved area - some extra information. Note that the first 127 * kilobyte is reserved for boot loader or disk label stuff... 128 * 129 * Having the magic at the end of the PAGE_SIZE makes detecting swap 130 * areas somewhat tricky on machines that support multiple page sizes. 131 * For 2.5 we'll probably want to move the magic to just beyond the 132 * bootbits... 133 */ 134 union swap_header { 135 struct { 136 char reserved[PAGE_SIZE - 10]; 137 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ 138 } magic; 139 struct { 140 char bootbits[1024]; /* Space for disklabel etc. */ 141 __u32 version; 142 __u32 last_page; 143 __u32 nr_badpages; 144 unsigned char sws_uuid[16]; 145 unsigned char sws_volume[16]; 146 __u32 padding[117]; 147 __u32 badpages[1]; 148 } info; 149 }; 150 151 /* 152 * current->reclaim_state points to one of these when a task is running 153 * memory reclaim 154 */ 155 struct reclaim_state { 156 /* pages reclaimed outside of LRU-based reclaim */ 157 unsigned long reclaimed; 158 #ifdef CONFIG_LRU_GEN 159 /* per-thread mm walk data */ 160 struct lru_gen_mm_walk *mm_walk; 161 #endif 162 }; 163 164 /* 165 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based 166 * reclaim 167 * @pages: number of pages reclaimed 168 * 169 * If the current process is undergoing a reclaim operation, increment the 170 * number of reclaimed pages by @pages. 171 */ 172 static inline void mm_account_reclaimed_pages(unsigned long pages) 173 { 174 if (current->reclaim_state) 175 current->reclaim_state->reclaimed += pages; 176 } 177 178 #ifdef __KERNEL__ 179 180 struct address_space; 181 struct sysinfo; 182 struct writeback_control; 183 struct zone; 184 185 /* 186 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of 187 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the 188 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart 189 * from setup, they're handled identically. 190 * 191 * We always assume that blocks are of size PAGE_SIZE. 192 */ 193 struct swap_extent { 194 struct rb_node rb_node; 195 pgoff_t start_page; 196 pgoff_t nr_pages; 197 sector_t start_block; 198 }; 199 200 /* 201 * Max bad pages in the new format.. 202 */ 203 #define MAX_SWAP_BADPAGES \ 204 ((offsetof(union swap_header, magic.magic) - \ 205 offsetof(union swap_header, info.badpages)) / sizeof(int)) 206 207 enum { 208 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 209 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 210 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ 211 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 212 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 213 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 214 SWP_BLKDEV = (1 << 6), /* its a block device */ 215 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ 216 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ 217 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ 218 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ 219 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ 220 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ 221 /* add others here before... */ 222 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ 223 }; 224 225 #define SWAP_CLUSTER_MAX 32UL 226 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 227 228 /* Bit flag in swap_map */ 229 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ 230 #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ 231 232 /* Special value in first swap_map */ 233 #define SWAP_MAP_MAX 0x3e /* Max count */ 234 #define SWAP_MAP_BAD 0x3f /* Note page is bad */ 235 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ 236 237 /* Special value in each swap_map continuation */ 238 #define SWAP_CONT_MAX 0x7f /* Max count */ 239 240 /* 241 * We use this to track usage of a cluster. A cluster is a block of swap disk 242 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All 243 * free clusters are organized into a list. We fetch an entry from the list to 244 * get a free cluster. 245 * 246 * The data field stores next cluster if the cluster is free or cluster usage 247 * counter otherwise. The flags field determines if a cluster is free. This is 248 * protected by swap_info_struct.lock. 249 */ 250 struct swap_cluster_info { 251 spinlock_t lock; /* 252 * Protect swap_cluster_info fields 253 * and swap_info_struct->swap_map 254 * elements correspond to the swap 255 * cluster 256 */ 257 unsigned int data:24; 258 unsigned int flags:8; 259 }; 260 #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ 261 #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ 262 263 /* 264 * The first page in the swap file is the swap header, which is always marked 265 * bad to prevent it from being allocated as an entry. This also prevents the 266 * cluster to which it belongs being marked free. Therefore 0 is safe to use as 267 * a sentinel to indicate next is not valid in percpu_cluster. 268 */ 269 #define SWAP_NEXT_INVALID 0 270 271 #ifdef CONFIG_THP_SWAP 272 #define SWAP_NR_ORDERS (PMD_ORDER + 1) 273 #else 274 #define SWAP_NR_ORDERS 1 275 #endif 276 277 /* 278 * We assign a cluster to each CPU, so each CPU can allocate swap entry from 279 * its own cluster and swapout sequentially. The purpose is to optimize swapout 280 * throughput. 281 */ 282 struct percpu_cluster { 283 unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ 284 }; 285 286 struct swap_cluster_list { 287 struct swap_cluster_info head; 288 struct swap_cluster_info tail; 289 }; 290 291 /* 292 * The in-memory structure used to track swap areas. 293 */ 294 struct swap_info_struct { 295 struct percpu_ref users; /* indicate and keep swap device valid. */ 296 unsigned long flags; /* SWP_USED etc: see above */ 297 signed short prio; /* swap priority of this type */ 298 struct plist_node list; /* entry in swap_active_head */ 299 signed char type; /* strange name for an index */ 300 unsigned int max; /* extent of the swap_map */ 301 unsigned char *swap_map; /* vmalloc'ed array of usage counts */ 302 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ 303 struct swap_cluster_list free_clusters; /* free clusters list */ 304 unsigned int lowest_bit; /* index of first free in swap_map */ 305 unsigned int highest_bit; /* index of last free in swap_map */ 306 unsigned int pages; /* total of usable pages of swap */ 307 unsigned int inuse_pages; /* number of those currently in use */ 308 unsigned int cluster_next; /* likely index for next allocation */ 309 unsigned int cluster_nr; /* countdown to next cluster search */ 310 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ 311 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ 312 struct rb_root swap_extent_root;/* root of the swap extent rbtree */ 313 struct file *bdev_file; /* open handle of the bdev */ 314 struct block_device *bdev; /* swap device or bdev of swap file */ 315 struct file *swap_file; /* seldom referenced */ 316 unsigned int old_block_size; /* seldom referenced */ 317 struct completion comp; /* seldom referenced */ 318 spinlock_t lock; /* 319 * protect map scan related fields like 320 * swap_map, lowest_bit, highest_bit, 321 * inuse_pages, cluster_next, 322 * cluster_nr, lowest_alloc, 323 * highest_alloc, free/discard cluster 324 * list. other fields are only changed 325 * at swapon/swapoff, so are protected 326 * by swap_lock. changing flags need 327 * hold this lock and swap_lock. If 328 * both locks need hold, hold swap_lock 329 * first. 330 */ 331 spinlock_t cont_lock; /* 332 * protect swap count continuation page 333 * list. 334 */ 335 struct work_struct discard_work; /* discard worker */ 336 struct swap_cluster_list discard_clusters; /* discard clusters list */ 337 struct plist_node avail_lists[]; /* 338 * entries in swap_avail_heads, one 339 * entry per node. 340 * Must be last as the number of the 341 * array is nr_node_ids, which is not 342 * a fixed value so have to allocate 343 * dynamically. 344 * And it has to be an array so that 345 * plist_for_each_* can work. 346 */ 347 }; 348 349 static inline swp_entry_t page_swap_entry(struct page *page) 350 { 351 struct folio *folio = page_folio(page); 352 swp_entry_t entry = folio->swap; 353 354 entry.val += folio_page_idx(folio, page); 355 return entry; 356 } 357 358 /* linux/mm/workingset.c */ 359 bool workingset_test_recent(void *shadow, bool file, bool *workingset); 360 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); 361 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); 362 void workingset_refault(struct folio *folio, void *shadow); 363 void workingset_activation(struct folio *folio); 364 365 /* linux/mm/page_alloc.c */ 366 extern unsigned long totalreserve_pages; 367 368 /* Definition of global_zone_page_state not available yet */ 369 #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) 370 371 372 /* linux/mm/swap.c */ 373 void lru_note_cost(struct lruvec *lruvec, bool file, 374 unsigned int nr_io, unsigned int nr_rotated); 375 void lru_note_cost_refault(struct folio *); 376 void folio_add_lru(struct folio *); 377 void folio_add_lru_vma(struct folio *, struct vm_area_struct *); 378 void mark_page_accessed(struct page *); 379 void folio_mark_accessed(struct folio *); 380 381 extern atomic_t lru_disable_count; 382 383 static inline bool lru_cache_disabled(void) 384 { 385 return atomic_read(&lru_disable_count); 386 } 387 388 static inline void lru_cache_enable(void) 389 { 390 atomic_dec(&lru_disable_count); 391 } 392 393 extern void lru_cache_disable(void); 394 extern void lru_add_drain(void); 395 extern void lru_add_drain_cpu(int cpu); 396 extern void lru_add_drain_cpu_zone(struct zone *zone); 397 extern void lru_add_drain_all(void); 398 void folio_deactivate(struct folio *folio); 399 void folio_mark_lazyfree(struct folio *folio); 400 extern void swap_setup(void); 401 402 /* linux/mm/vmscan.c */ 403 extern unsigned long zone_reclaimable_pages(struct zone *zone); 404 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 405 gfp_t gfp_mask, nodemask_t *mask); 406 407 #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) 408 #define MEMCG_RECLAIM_PROACTIVE (1 << 2) 409 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 410 unsigned long nr_pages, 411 gfp_t gfp_mask, 412 unsigned int reclaim_options); 413 extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, 414 gfp_t gfp_mask, bool noswap, 415 pg_data_t *pgdat, 416 unsigned long *nr_scanned); 417 extern unsigned long shrink_all_memory(unsigned long nr_pages); 418 extern int vm_swappiness; 419 long remove_mapping(struct address_space *mapping, struct folio *folio); 420 421 #ifdef CONFIG_NUMA 422 extern int node_reclaim_mode; 423 extern int sysctl_min_unmapped_ratio; 424 extern int sysctl_min_slab_ratio; 425 #else 426 #define node_reclaim_mode 0 427 #endif 428 429 static inline bool node_reclaim_enabled(void) 430 { 431 /* Is any node_reclaim_mode bit set? */ 432 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); 433 } 434 435 void check_move_unevictable_folios(struct folio_batch *fbatch); 436 437 extern void __meminit kswapd_run(int nid); 438 extern void __meminit kswapd_stop(int nid); 439 440 #ifdef CONFIG_SWAP 441 442 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 443 unsigned long nr_pages, sector_t start_block); 444 int generic_swapfile_activate(struct swap_info_struct *, struct file *, 445 sector_t *); 446 447 static inline unsigned long total_swapcache_pages(void) 448 { 449 return global_node_page_state(NR_SWAPCACHE); 450 } 451 452 void free_swap_cache(struct folio *folio); 453 void free_page_and_swap_cache(struct page *); 454 void free_pages_and_swap_cache(struct encoded_page **, int); 455 /* linux/mm/swapfile.c */ 456 extern atomic_long_t nr_swap_pages; 457 extern long total_swap_pages; 458 extern atomic_t nr_rotate_swap; 459 extern bool has_usable_swap(void); 460 461 /* Swap 50% full? Release swapcache more aggressively.. */ 462 static inline bool vm_swap_full(void) 463 { 464 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; 465 } 466 467 static inline long get_nr_swap_pages(void) 468 { 469 return atomic_long_read(&nr_swap_pages); 470 } 471 472 extern void si_swapinfo(struct sysinfo *); 473 swp_entry_t folio_alloc_swap(struct folio *folio); 474 bool folio_free_swap(struct folio *folio); 475 void put_swap_folio(struct folio *folio, swp_entry_t entry); 476 extern swp_entry_t get_swap_page_of_type(int); 477 extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); 478 extern int add_swap_count_continuation(swp_entry_t, gfp_t); 479 extern void swap_shmem_alloc(swp_entry_t); 480 extern int swap_duplicate(swp_entry_t); 481 extern int swapcache_prepare(swp_entry_t); 482 extern void swap_free(swp_entry_t); 483 extern void swapcache_free_entries(swp_entry_t *entries, int n); 484 extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); 485 int swap_type_of(dev_t device, sector_t offset); 486 int find_first_swap(dev_t *device); 487 extern unsigned int count_swap_pages(int, int); 488 extern sector_t swapdev_block(int, pgoff_t); 489 extern int __swap_count(swp_entry_t entry); 490 extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry); 491 extern int swp_swapcount(swp_entry_t entry); 492 struct swap_info_struct *swp_swap_info(swp_entry_t entry); 493 struct backing_dev_info; 494 extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); 495 extern void exit_swap_address_space(unsigned int type); 496 extern struct swap_info_struct *get_swap_device(swp_entry_t entry); 497 sector_t swap_folio_sector(struct folio *folio); 498 499 static inline void put_swap_device(struct swap_info_struct *si) 500 { 501 percpu_ref_put(&si->users); 502 } 503 504 #else /* CONFIG_SWAP */ 505 static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) 506 { 507 return NULL; 508 } 509 510 static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) 511 { 512 return NULL; 513 } 514 515 static inline void put_swap_device(struct swap_info_struct *si) 516 { 517 } 518 519 #define get_nr_swap_pages() 0L 520 #define total_swap_pages 0L 521 #define total_swapcache_pages() 0UL 522 #define vm_swap_full() 0 523 524 #define si_swapinfo(val) \ 525 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 526 /* only sparc can not include linux/pagemap.h in this file 527 * so leave put_page and release_pages undeclared... */ 528 #define free_page_and_swap_cache(page) \ 529 put_page(page) 530 #define free_pages_and_swap_cache(pages, nr) \ 531 release_pages((pages), (nr)); 532 533 static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) 534 { 535 } 536 537 static inline void free_swap_cache(struct folio *folio) 538 { 539 } 540 541 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) 542 { 543 return 0; 544 } 545 546 static inline void swap_shmem_alloc(swp_entry_t swp) 547 { 548 } 549 550 static inline int swap_duplicate(swp_entry_t swp) 551 { 552 return 0; 553 } 554 555 static inline int swapcache_prepare(swp_entry_t swp) 556 { 557 return 0; 558 } 559 560 static inline void swap_free(swp_entry_t swp) 561 { 562 } 563 564 static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) 565 { 566 } 567 568 static inline int __swap_count(swp_entry_t entry) 569 { 570 return 0; 571 } 572 573 static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) 574 { 575 return 0; 576 } 577 578 static inline int swp_swapcount(swp_entry_t entry) 579 { 580 return 0; 581 } 582 583 static inline swp_entry_t folio_alloc_swap(struct folio *folio) 584 { 585 swp_entry_t entry; 586 entry.val = 0; 587 return entry; 588 } 589 590 static inline bool folio_free_swap(struct folio *folio) 591 { 592 return false; 593 } 594 595 static inline int add_swap_extent(struct swap_info_struct *sis, 596 unsigned long start_page, 597 unsigned long nr_pages, sector_t start_block) 598 { 599 return -EINVAL; 600 } 601 #endif /* CONFIG_SWAP */ 602 603 static inline void free_swap_and_cache(swp_entry_t entry) 604 { 605 free_swap_and_cache_nr(entry, 1); 606 } 607 608 #ifdef CONFIG_MEMCG 609 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) 610 { 611 /* Cgroup2 doesn't have per-cgroup swappiness */ 612 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) 613 return READ_ONCE(vm_swappiness); 614 615 /* root ? */ 616 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) 617 return READ_ONCE(vm_swappiness); 618 619 return READ_ONCE(memcg->swappiness); 620 } 621 #else 622 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 623 { 624 return READ_ONCE(vm_swappiness); 625 } 626 #endif 627 628 #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 629 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); 630 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) 631 { 632 if (mem_cgroup_disabled()) 633 return; 634 __folio_throttle_swaprate(folio, gfp); 635 } 636 #else 637 static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) 638 { 639 } 640 #endif 641 642 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) 643 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry); 644 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); 645 static inline int mem_cgroup_try_charge_swap(struct folio *folio, 646 swp_entry_t entry) 647 { 648 if (mem_cgroup_disabled()) 649 return 0; 650 return __mem_cgroup_try_charge_swap(folio, entry); 651 } 652 653 extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); 654 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) 655 { 656 if (mem_cgroup_disabled()) 657 return; 658 __mem_cgroup_uncharge_swap(entry, nr_pages); 659 } 660 661 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 662 extern bool mem_cgroup_swap_full(struct folio *folio); 663 #else 664 static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) 665 { 666 } 667 668 static inline int mem_cgroup_try_charge_swap(struct folio *folio, 669 swp_entry_t entry) 670 { 671 return 0; 672 } 673 674 static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, 675 unsigned int nr_pages) 676 { 677 } 678 679 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) 680 { 681 return get_nr_swap_pages(); 682 } 683 684 static inline bool mem_cgroup_swap_full(struct folio *folio) 685 { 686 return vm_swap_full(); 687 } 688 #endif 689 690 #endif /* __KERNEL__*/ 691 #endif /* _LINUX_SWAP_H */ 692