xref: /linux-6.15/include/linux/swap.h (revision facb4edc)
1 #ifndef _LINUX_SWAP_H
2 #define _LINUX_SWAP_H
3 
4 #include <linux/spinlock.h>
5 #include <linux/linkage.h>
6 #include <linux/mmzone.h>
7 #include <linux/list.h>
8 #include <linux/memcontrol.h>
9 #include <linux/sched.h>
10 #include <linux/node.h>
11 
12 #include <asm/atomic.h>
13 #include <asm/page.h>
14 
15 struct notifier_block;
16 
17 struct bio;
18 
19 #define SWAP_FLAG_PREFER	0x8000	/* set if swap priority specified */
20 #define SWAP_FLAG_PRIO_MASK	0x7fff
21 #define SWAP_FLAG_PRIO_SHIFT	0
22 #define SWAP_FLAG_DISCARD	0x10000 /* discard swap cluster after use */
23 
24 static inline int current_is_kswapd(void)
25 {
26 	return current->flags & PF_KSWAPD;
27 }
28 
29 /*
30  * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
31  * be swapped to.  The swap type and the offset into that swap type are
32  * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
33  * for the type means that the maximum number of swapcache pages is 27 bits
34  * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
35  * the type/offset into the pte as 5/27 as well.
36  */
37 #define MAX_SWAPFILES_SHIFT	5
38 
39 /*
40  * Use some of the swap files numbers for other purposes. This
41  * is a convenient way to hook into the VM to trigger special
42  * actions on faults.
43  */
44 
45 /*
46  * NUMA node memory migration support
47  */
48 #ifdef CONFIG_MIGRATION
49 #define SWP_MIGRATION_NUM 2
50 #define SWP_MIGRATION_READ	(MAX_SWAPFILES + SWP_HWPOISON_NUM)
51 #define SWP_MIGRATION_WRITE	(MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
52 #else
53 #define SWP_MIGRATION_NUM 0
54 #endif
55 
56 /*
57  * Handling of hardware poisoned pages with memory corruption.
58  */
59 #ifdef CONFIG_MEMORY_FAILURE
60 #define SWP_HWPOISON_NUM 1
61 #define SWP_HWPOISON		MAX_SWAPFILES
62 #else
63 #define SWP_HWPOISON_NUM 0
64 #endif
65 
66 #define MAX_SWAPFILES \
67 	((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
68 
69 /*
70  * Magic header for a swap area. The first part of the union is
71  * what the swap magic looks like for the old (limited to 128MB)
72  * swap area format, the second part of the union adds - in the
73  * old reserved area - some extra information. Note that the first
74  * kilobyte is reserved for boot loader or disk label stuff...
75  *
76  * Having the magic at the end of the PAGE_SIZE makes detecting swap
77  * areas somewhat tricky on machines that support multiple page sizes.
78  * For 2.5 we'll probably want to move the magic to just beyond the
79  * bootbits...
80  */
81 union swap_header {
82 	struct {
83 		char reserved[PAGE_SIZE - 10];
84 		char magic[10];			/* SWAP-SPACE or SWAPSPACE2 */
85 	} magic;
86 	struct {
87 		char		bootbits[1024];	/* Space for disklabel etc. */
88 		__u32		version;
89 		__u32		last_page;
90 		__u32		nr_badpages;
91 		unsigned char	sws_uuid[16];
92 		unsigned char	sws_volume[16];
93 		__u32		padding[117];
94 		__u32		badpages[1];
95 	} info;
96 };
97 
98  /* A swap entry has to fit into a "unsigned long", as
99   * the entry is hidden in the "index" field of the
100   * swapper address space.
101   */
102 typedef struct {
103 	unsigned long val;
104 } swp_entry_t;
105 
106 /*
107  * current->reclaim_state points to one of these when a task is running
108  * memory reclaim
109  */
110 struct reclaim_state {
111 	unsigned long reclaimed_slab;
112 };
113 
114 #ifdef __KERNEL__
115 
116 struct address_space;
117 struct sysinfo;
118 struct writeback_control;
119 struct zone;
120 
121 /*
122  * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
123  * disk blocks.  A list of swap extents maps the entire swapfile.  (Where the
124  * term `swapfile' refers to either a blockdevice or an IS_REG file.  Apart
125  * from setup, they're handled identically.
126  *
127  * We always assume that blocks are of size PAGE_SIZE.
128  */
129 struct swap_extent {
130 	struct list_head list;
131 	pgoff_t start_page;
132 	pgoff_t nr_pages;
133 	sector_t start_block;
134 };
135 
136 /*
137  * Max bad pages in the new format..
138  */
139 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
140 #define MAX_SWAP_BADPAGES \
141 	((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
142 
143 enum {
144 	SWP_USED	= (1 << 0),	/* is slot in swap_info[] used? */
145 	SWP_WRITEOK	= (1 << 1),	/* ok to write to this swap?	*/
146 	SWP_DISCARDABLE = (1 << 2),	/* swapon+blkdev support discard */
147 	SWP_DISCARDING	= (1 << 3),	/* now discarding a free cluster */
148 	SWP_SOLIDSTATE	= (1 << 4),	/* blkdev seeks are cheap */
149 	SWP_CONTINUED	= (1 << 5),	/* swap_map has count continuation */
150 	SWP_BLKDEV	= (1 << 6),	/* its a block device */
151 					/* add others here before... */
152 	SWP_SCANNING	= (1 << 8),	/* refcount in scan_swap_map */
153 };
154 
155 #define SWAP_CLUSTER_MAX 32
156 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
157 
158 #define SWAP_MAP_MAX	0x3e	/* Max duplication count, in first swap_map */
159 #define SWAP_MAP_BAD	0x3f	/* Note pageblock is bad, in first swap_map */
160 #define SWAP_HAS_CACHE	0x40	/* Flag page is cached, in first swap_map */
161 #define SWAP_CONT_MAX	0x7f	/* Max count, in each swap_map continuation */
162 #define COUNT_CONTINUED	0x80	/* See swap_map continuation for full count */
163 #define SWAP_MAP_SHMEM	0xbf	/* Owned by shmem/tmpfs, in first swap_map */
164 
165 /*
166  * The in-memory structure used to track swap areas.
167  */
168 struct swap_info_struct {
169 	unsigned long	flags;		/* SWP_USED etc: see above */
170 	signed short	prio;		/* swap priority of this type */
171 	signed char	type;		/* strange name for an index */
172 	signed char	next;		/* next type on the swap list */
173 	unsigned int	max;		/* extent of the swap_map */
174 	unsigned char *swap_map;	/* vmalloc'ed array of usage counts */
175 	unsigned int lowest_bit;	/* index of first free in swap_map */
176 	unsigned int highest_bit;	/* index of last free in swap_map */
177 	unsigned int pages;		/* total of usable pages of swap */
178 	unsigned int inuse_pages;	/* number of those currently in use */
179 	unsigned int cluster_next;	/* likely index for next allocation */
180 	unsigned int cluster_nr;	/* countdown to next cluster search */
181 	unsigned int lowest_alloc;	/* while preparing discard cluster */
182 	unsigned int highest_alloc;	/* while preparing discard cluster */
183 	struct swap_extent *curr_swap_extent;
184 	struct swap_extent first_swap_extent;
185 	struct block_device *bdev;	/* swap device or bdev of swap file */
186 	struct file *swap_file;		/* seldom referenced */
187 	unsigned int old_block_size;	/* seldom referenced */
188 };
189 
190 struct swap_list_t {
191 	int head;	/* head of priority-ordered swapfile list */
192 	int next;	/* swapfile to be used next */
193 };
194 
195 /* Swap 50% full? Release swapcache more aggressively.. */
196 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
197 
198 /* linux/mm/page_alloc.c */
199 extern unsigned long totalram_pages;
200 extern unsigned long totalreserve_pages;
201 extern unsigned int nr_free_buffer_pages(void);
202 extern unsigned int nr_free_pagecache_pages(void);
203 
204 /* Definition of global_page_state not available yet */
205 #define nr_free_pages() global_page_state(NR_FREE_PAGES)
206 
207 
208 /* linux/mm/swap.c */
209 extern void __lru_cache_add(struct page *, enum lru_list lru);
210 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
211 extern void activate_page(struct page *);
212 extern void mark_page_accessed(struct page *);
213 extern void lru_add_drain(void);
214 extern int lru_add_drain_all(void);
215 extern void rotate_reclaimable_page(struct page *page);
216 extern void swap_setup(void);
217 
218 extern void add_page_to_unevictable_list(struct page *page);
219 
220 /**
221  * lru_cache_add: add a page to the page lists
222  * @page: the page to add
223  */
224 static inline void lru_cache_add_anon(struct page *page)
225 {
226 	__lru_cache_add(page, LRU_INACTIVE_ANON);
227 }
228 
229 static inline void lru_cache_add_file(struct page *page)
230 {
231 	__lru_cache_add(page, LRU_INACTIVE_FILE);
232 }
233 
234 /* LRU Isolation modes. */
235 #define ISOLATE_INACTIVE 0	/* Isolate inactive pages. */
236 #define ISOLATE_ACTIVE 1	/* Isolate active pages. */
237 #define ISOLATE_BOTH 2		/* Isolate both active and inactive pages. */
238 
239 /* linux/mm/vmscan.c */
240 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
241 					gfp_t gfp_mask, nodemask_t *mask);
242 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
243 						  gfp_t gfp_mask, bool noswap,
244 						  unsigned int swappiness);
245 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
246 						gfp_t gfp_mask, bool noswap,
247 						unsigned int swappiness,
248 						struct zone *zone);
249 extern int __isolate_lru_page(struct page *page, int mode, int file);
250 extern unsigned long shrink_all_memory(unsigned long nr_pages);
251 extern int vm_swappiness;
252 extern int remove_mapping(struct address_space *mapping, struct page *page);
253 extern long vm_total_pages;
254 
255 #ifdef CONFIG_NUMA
256 extern int zone_reclaim_mode;
257 extern int sysctl_min_unmapped_ratio;
258 extern int sysctl_min_slab_ratio;
259 extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
260 #else
261 #define zone_reclaim_mode 0
262 static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
263 {
264 	return 0;
265 }
266 #endif
267 
268 extern int page_evictable(struct page *page, struct vm_area_struct *vma);
269 extern void scan_mapping_unevictable_pages(struct address_space *);
270 
271 extern unsigned long scan_unevictable_pages;
272 extern int scan_unevictable_handler(struct ctl_table *, int,
273 					void __user *, size_t *, loff_t *);
274 #ifdef CONFIG_NUMA
275 extern int scan_unevictable_register_node(struct node *node);
276 extern void scan_unevictable_unregister_node(struct node *node);
277 #else
278 static inline int scan_unevictable_register_node(struct node *node)
279 {
280 	return 0;
281 }
282 static inline void scan_unevictable_unregister_node(struct node *node)
283 {
284 }
285 #endif
286 
287 extern int kswapd_run(int nid);
288 extern void kswapd_stop(int nid);
289 
290 #ifdef CONFIG_MMU
291 /* linux/mm/shmem.c */
292 extern int shmem_unuse(swp_entry_t entry, struct page *page);
293 #endif /* CONFIG_MMU */
294 
295 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
296 extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
297 					struct page **pagep, swp_entry_t *ent);
298 #endif
299 
300 extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
301 
302 #ifdef CONFIG_SWAP
303 /* linux/mm/page_io.c */
304 extern int swap_readpage(struct page *);
305 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
306 extern void end_swap_bio_read(struct bio *bio, int err);
307 
308 /* linux/mm/swap_state.c */
309 extern struct address_space swapper_space;
310 #define total_swapcache_pages  swapper_space.nrpages
311 extern void show_swap_cache_info(void);
312 extern int add_to_swap(struct page *);
313 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
314 extern void __delete_from_swap_cache(struct page *);
315 extern void delete_from_swap_cache(struct page *);
316 extern void free_page_and_swap_cache(struct page *);
317 extern void free_pages_and_swap_cache(struct page **, int);
318 extern struct page *lookup_swap_cache(swp_entry_t);
319 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
320 			struct vm_area_struct *vma, unsigned long addr);
321 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
322 			struct vm_area_struct *vma, unsigned long addr);
323 
324 /* linux/mm/swapfile.c */
325 extern long nr_swap_pages;
326 extern long total_swap_pages;
327 extern void si_swapinfo(struct sysinfo *);
328 extern swp_entry_t get_swap_page(void);
329 extern swp_entry_t get_swap_page_of_type(int);
330 extern int valid_swaphandles(swp_entry_t, unsigned long *);
331 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
332 extern void swap_shmem_alloc(swp_entry_t);
333 extern int swap_duplicate(swp_entry_t);
334 extern int swapcache_prepare(swp_entry_t);
335 extern void swap_free(swp_entry_t);
336 extern void swapcache_free(swp_entry_t, struct page *page);
337 extern int free_swap_and_cache(swp_entry_t);
338 extern int swap_type_of(dev_t, sector_t, struct block_device **);
339 extern unsigned int count_swap_pages(int, int);
340 extern sector_t map_swap_page(struct page *, struct block_device **);
341 extern sector_t swapdev_block(int, pgoff_t);
342 extern int reuse_swap_page(struct page *);
343 extern int try_to_free_swap(struct page *);
344 struct backing_dev_info;
345 
346 /* linux/mm/thrash.c */
347 extern struct mm_struct *swap_token_mm;
348 extern void grab_swap_token(struct mm_struct *);
349 extern void __put_swap_token(struct mm_struct *);
350 
351 static inline int has_swap_token(struct mm_struct *mm)
352 {
353 	return (mm == swap_token_mm);
354 }
355 
356 static inline void put_swap_token(struct mm_struct *mm)
357 {
358 	if (has_swap_token(mm))
359 		__put_swap_token(mm);
360 }
361 
362 static inline void disable_swap_token(void)
363 {
364 	put_swap_token(swap_token_mm);
365 }
366 
367 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
368 extern void
369 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
370 extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
371 #else
372 static inline void
373 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
374 {
375 }
376 #endif
377 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
378 extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
379 #else
380 static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
381 {
382 }
383 #endif
384 
385 #else /* CONFIG_SWAP */
386 
387 #define nr_swap_pages				0L
388 #define total_swap_pages			0L
389 #define total_swapcache_pages			0UL
390 
391 #define si_swapinfo(val) \
392 	do { (val)->freeswap = (val)->totalswap = 0; } while (0)
393 /* only sparc can not include linux/pagemap.h in this file
394  * so leave page_cache_release and release_pages undeclared... */
395 #define free_page_and_swap_cache(page) \
396 	page_cache_release(page)
397 #define free_pages_and_swap_cache(pages, nr) \
398 	release_pages((pages), (nr), 0);
399 
400 static inline void show_swap_cache_info(void)
401 {
402 }
403 
404 #define free_swap_and_cache(swp)	is_migration_entry(swp)
405 #define swapcache_prepare(swp)		is_migration_entry(swp)
406 
407 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
408 {
409 	return 0;
410 }
411 
412 static inline void swap_shmem_alloc(swp_entry_t swp)
413 {
414 }
415 
416 static inline int swap_duplicate(swp_entry_t swp)
417 {
418 	return 0;
419 }
420 
421 static inline void swap_free(swp_entry_t swp)
422 {
423 }
424 
425 static inline void swapcache_free(swp_entry_t swp, struct page *page)
426 {
427 }
428 
429 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
430 			struct vm_area_struct *vma, unsigned long addr)
431 {
432 	return NULL;
433 }
434 
435 static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
436 {
437 	return 0;
438 }
439 
440 static inline struct page *lookup_swap_cache(swp_entry_t swp)
441 {
442 	return NULL;
443 }
444 
445 static inline int add_to_swap(struct page *page)
446 {
447 	return 0;
448 }
449 
450 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
451 							gfp_t gfp_mask)
452 {
453 	return -1;
454 }
455 
456 static inline void __delete_from_swap_cache(struct page *page)
457 {
458 }
459 
460 static inline void delete_from_swap_cache(struct page *page)
461 {
462 }
463 
464 #define reuse_swap_page(page)	(page_mapcount(page) == 1)
465 
466 static inline int try_to_free_swap(struct page *page)
467 {
468 	return 0;
469 }
470 
471 static inline swp_entry_t get_swap_page(void)
472 {
473 	swp_entry_t entry;
474 	entry.val = 0;
475 	return entry;
476 }
477 
478 /* linux/mm/thrash.c */
479 static inline void put_swap_token(struct mm_struct *mm)
480 {
481 }
482 
483 static inline void grab_swap_token(struct mm_struct *mm)
484 {
485 }
486 
487 static inline int has_swap_token(struct mm_struct *mm)
488 {
489 	return 0;
490 }
491 
492 static inline void disable_swap_token(void)
493 {
494 }
495 
496 static inline void
497 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
498 {
499 }
500 
501 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
502 static inline int
503 mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
504 {
505 	return 0;
506 }
507 #endif
508 
509 #endif /* CONFIG_SWAP */
510 #endif /* __KERNEL__*/
511 #endif /* _LINUX_SWAP_H */
512