1 #ifndef _LINUX_SWAP_H 2 #define _LINUX_SWAP_H 3 4 #include <linux/spinlock.h> 5 #include <linux/linkage.h> 6 #include <linux/mmzone.h> 7 #include <linux/list.h> 8 #include <linux/memcontrol.h> 9 #include <linux/sched.h> 10 #include <linux/node.h> 11 12 #include <linux/atomic.h> 13 #include <asm/page.h> 14 15 struct notifier_block; 16 17 struct bio; 18 19 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ 20 #define SWAP_FLAG_PRIO_MASK 0x7fff 21 #define SWAP_FLAG_PRIO_SHIFT 0 22 #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ 23 24 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ 25 SWAP_FLAG_DISCARD) 26 27 static inline int current_is_kswapd(void) 28 { 29 return current->flags & PF_KSWAPD; 30 } 31 32 /* 33 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can 34 * be swapped to. The swap type and the offset into that swap type are 35 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits 36 * for the type means that the maximum number of swapcache pages is 27 bits 37 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs 38 * the type/offset into the pte as 5/27 as well. 39 */ 40 #define MAX_SWAPFILES_SHIFT 5 41 42 /* 43 * Use some of the swap files numbers for other purposes. This 44 * is a convenient way to hook into the VM to trigger special 45 * actions on faults. 46 */ 47 48 /* 49 * NUMA node memory migration support 50 */ 51 #ifdef CONFIG_MIGRATION 52 #define SWP_MIGRATION_NUM 2 53 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) 54 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) 55 #else 56 #define SWP_MIGRATION_NUM 0 57 #endif 58 59 /* 60 * Handling of hardware poisoned pages with memory corruption. 61 */ 62 #ifdef CONFIG_MEMORY_FAILURE 63 #define SWP_HWPOISON_NUM 1 64 #define SWP_HWPOISON MAX_SWAPFILES 65 #else 66 #define SWP_HWPOISON_NUM 0 67 #endif 68 69 #define MAX_SWAPFILES \ 70 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM) 71 72 /* 73 * Magic header for a swap area. The first part of the union is 74 * what the swap magic looks like for the old (limited to 128MB) 75 * swap area format, the second part of the union adds - in the 76 * old reserved area - some extra information. Note that the first 77 * kilobyte is reserved for boot loader or disk label stuff... 78 * 79 * Having the magic at the end of the PAGE_SIZE makes detecting swap 80 * areas somewhat tricky on machines that support multiple page sizes. 81 * For 2.5 we'll probably want to move the magic to just beyond the 82 * bootbits... 83 */ 84 union swap_header { 85 struct { 86 char reserved[PAGE_SIZE - 10]; 87 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ 88 } magic; 89 struct { 90 char bootbits[1024]; /* Space for disklabel etc. */ 91 __u32 version; 92 __u32 last_page; 93 __u32 nr_badpages; 94 unsigned char sws_uuid[16]; 95 unsigned char sws_volume[16]; 96 __u32 padding[117]; 97 __u32 badpages[1]; 98 } info; 99 }; 100 101 /* A swap entry has to fit into a "unsigned long", as 102 * the entry is hidden in the "index" field of the 103 * swapper address space. 104 */ 105 typedef struct { 106 unsigned long val; 107 } swp_entry_t; 108 109 /* 110 * current->reclaim_state points to one of these when a task is running 111 * memory reclaim 112 */ 113 struct reclaim_state { 114 unsigned long reclaimed_slab; 115 }; 116 117 #ifdef __KERNEL__ 118 119 struct address_space; 120 struct sysinfo; 121 struct writeback_control; 122 struct zone; 123 124 /* 125 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of 126 * disk blocks. A list of swap extents maps the entire swapfile. (Where the 127 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart 128 * from setup, they're handled identically. 129 * 130 * We always assume that blocks are of size PAGE_SIZE. 131 */ 132 struct swap_extent { 133 struct list_head list; 134 pgoff_t start_page; 135 pgoff_t nr_pages; 136 sector_t start_block; 137 }; 138 139 /* 140 * Max bad pages in the new format.. 141 */ 142 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) 143 #define MAX_SWAP_BADPAGES \ 144 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) 145 146 enum { 147 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ 148 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ 149 SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ 150 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 151 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 152 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ 153 SWP_BLKDEV = (1 << 6), /* its a block device */ 154 /* add others here before... */ 155 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ 156 }; 157 158 #define SWAP_CLUSTER_MAX 32 159 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 160 161 /* 162 * Ratio between the present memory in the zone and the "gap" that 163 * we're allowing kswapd to shrink in addition to the per-zone high 164 * wmark, even for zones that already have the high wmark satisfied, 165 * in order to provide better per-zone lru behavior. We are ok to 166 * spend not more than 1% of the memory for this zone balancing "gap". 167 */ 168 #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100 169 170 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ 171 #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ 172 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ 173 #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ 174 #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ 175 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ 176 177 /* 178 * The in-memory structure used to track swap areas. 179 */ 180 struct swap_info_struct { 181 unsigned long flags; /* SWP_USED etc: see above */ 182 signed short prio; /* swap priority of this type */ 183 signed char type; /* strange name for an index */ 184 signed char next; /* next type on the swap list */ 185 unsigned int max; /* extent of the swap_map */ 186 unsigned char *swap_map; /* vmalloc'ed array of usage counts */ 187 unsigned int lowest_bit; /* index of first free in swap_map */ 188 unsigned int highest_bit; /* index of last free in swap_map */ 189 unsigned int pages; /* total of usable pages of swap */ 190 unsigned int inuse_pages; /* number of those currently in use */ 191 unsigned int cluster_next; /* likely index for next allocation */ 192 unsigned int cluster_nr; /* countdown to next cluster search */ 193 unsigned int lowest_alloc; /* while preparing discard cluster */ 194 unsigned int highest_alloc; /* while preparing discard cluster */ 195 struct swap_extent *curr_swap_extent; 196 struct swap_extent first_swap_extent; 197 struct block_device *bdev; /* swap device or bdev of swap file */ 198 struct file *swap_file; /* seldom referenced */ 199 unsigned int old_block_size; /* seldom referenced */ 200 }; 201 202 struct swap_list_t { 203 int head; /* head of priority-ordered swapfile list */ 204 int next; /* swapfile to be used next */ 205 }; 206 207 /* Swap 50% full? Release swapcache more aggressively.. */ 208 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) 209 210 /* linux/mm/page_alloc.c */ 211 extern unsigned long totalram_pages; 212 extern unsigned long totalreserve_pages; 213 extern unsigned long dirty_balance_reserve; 214 extern unsigned int nr_free_buffer_pages(void); 215 extern unsigned int nr_free_pagecache_pages(void); 216 217 /* Definition of global_page_state not available yet */ 218 #define nr_free_pages() global_page_state(NR_FREE_PAGES) 219 220 221 /* linux/mm/swap.c */ 222 extern void __lru_cache_add(struct page *, enum lru_list lru); 223 extern void lru_cache_add_lru(struct page *, enum lru_list lru); 224 extern void lru_add_page_tail(struct zone* zone, 225 struct page *page, struct page *page_tail); 226 extern void activate_page(struct page *); 227 extern void mark_page_accessed(struct page *); 228 extern void lru_add_drain(void); 229 extern void lru_add_drain_cpu(int cpu); 230 extern int lru_add_drain_all(void); 231 extern void rotate_reclaimable_page(struct page *page); 232 extern void deactivate_page(struct page *page); 233 extern void swap_setup(void); 234 235 extern void add_page_to_unevictable_list(struct page *page); 236 237 /** 238 * lru_cache_add: add a page to the page lists 239 * @page: the page to add 240 */ 241 static inline void lru_cache_add_anon(struct page *page) 242 { 243 __lru_cache_add(page, LRU_INACTIVE_ANON); 244 } 245 246 static inline void lru_cache_add_file(struct page *page) 247 { 248 __lru_cache_add(page, LRU_INACTIVE_FILE); 249 } 250 251 /* linux/mm/vmscan.c */ 252 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 253 gfp_t gfp_mask, nodemask_t *mask); 254 extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file); 255 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 256 gfp_t gfp_mask, bool noswap); 257 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 258 gfp_t gfp_mask, bool noswap, 259 struct zone *zone, 260 unsigned long *nr_scanned); 261 extern unsigned long shrink_all_memory(unsigned long nr_pages); 262 extern int vm_swappiness; 263 extern int remove_mapping(struct address_space *mapping, struct page *page); 264 extern long vm_total_pages; 265 266 #ifdef CONFIG_NUMA 267 extern int zone_reclaim_mode; 268 extern int sysctl_min_unmapped_ratio; 269 extern int sysctl_min_slab_ratio; 270 extern int zone_reclaim(struct zone *, gfp_t, unsigned int); 271 #else 272 #define zone_reclaim_mode 0 273 static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) 274 { 275 return 0; 276 } 277 #endif 278 279 extern int page_evictable(struct page *page, struct vm_area_struct *vma); 280 extern void check_move_unevictable_pages(struct page **, int nr_pages); 281 282 extern unsigned long scan_unevictable_pages; 283 extern int scan_unevictable_handler(struct ctl_table *, int, 284 void __user *, size_t *, loff_t *); 285 #ifdef CONFIG_NUMA 286 extern int scan_unevictable_register_node(struct node *node); 287 extern void scan_unevictable_unregister_node(struct node *node); 288 #else 289 static inline int scan_unevictable_register_node(struct node *node) 290 { 291 return 0; 292 } 293 static inline void scan_unevictable_unregister_node(struct node *node) 294 { 295 } 296 #endif 297 298 extern int kswapd_run(int nid); 299 extern void kswapd_stop(int nid); 300 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 301 extern int mem_cgroup_swappiness(struct mem_cgroup *mem); 302 #else 303 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) 304 { 305 return vm_swappiness; 306 } 307 #endif 308 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 309 extern void mem_cgroup_uncharge_swap(swp_entry_t ent); 310 #else 311 static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) 312 { 313 } 314 #endif 315 #ifdef CONFIG_SWAP 316 /* linux/mm/page_io.c */ 317 extern int swap_readpage(struct page *); 318 extern int swap_writepage(struct page *page, struct writeback_control *wbc); 319 extern void end_swap_bio_read(struct bio *bio, int err); 320 321 /* linux/mm/swap_state.c */ 322 extern struct address_space swapper_space; 323 #define total_swapcache_pages swapper_space.nrpages 324 extern void show_swap_cache_info(void); 325 extern int add_to_swap(struct page *); 326 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); 327 extern void __delete_from_swap_cache(struct page *); 328 extern void delete_from_swap_cache(struct page *); 329 extern void free_page_and_swap_cache(struct page *); 330 extern void free_pages_and_swap_cache(struct page **, int); 331 extern struct page *lookup_swap_cache(swp_entry_t); 332 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, 333 struct vm_area_struct *vma, unsigned long addr); 334 extern struct page *swapin_readahead(swp_entry_t, gfp_t, 335 struct vm_area_struct *vma, unsigned long addr); 336 337 /* linux/mm/swapfile.c */ 338 extern long nr_swap_pages; 339 extern long total_swap_pages; 340 extern void si_swapinfo(struct sysinfo *); 341 extern swp_entry_t get_swap_page(void); 342 extern swp_entry_t get_swap_page_of_type(int); 343 extern int add_swap_count_continuation(swp_entry_t, gfp_t); 344 extern void swap_shmem_alloc(swp_entry_t); 345 extern int swap_duplicate(swp_entry_t); 346 extern int swapcache_prepare(swp_entry_t); 347 extern void swap_free(swp_entry_t); 348 extern void swapcache_free(swp_entry_t, struct page *page); 349 extern int free_swap_and_cache(swp_entry_t); 350 extern int swap_type_of(dev_t, sector_t, struct block_device **); 351 extern unsigned int count_swap_pages(int, int); 352 extern sector_t map_swap_page(struct page *, struct block_device **); 353 extern sector_t swapdev_block(int, pgoff_t); 354 extern int reuse_swap_page(struct page *); 355 extern int try_to_free_swap(struct page *); 356 struct backing_dev_info; 357 358 /* linux/mm/thrash.c */ 359 extern struct mm_struct *swap_token_mm; 360 extern void grab_swap_token(struct mm_struct *); 361 extern void __put_swap_token(struct mm_struct *); 362 extern void disable_swap_token(struct mem_cgroup *memcg); 363 364 static inline int has_swap_token(struct mm_struct *mm) 365 { 366 return (mm == swap_token_mm); 367 } 368 369 static inline void put_swap_token(struct mm_struct *mm) 370 { 371 if (has_swap_token(mm)) 372 __put_swap_token(mm); 373 } 374 375 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 376 extern void 377 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); 378 extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep); 379 #else 380 static inline void 381 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 382 { 383 } 384 #endif 385 386 #else /* CONFIG_SWAP */ 387 388 #define nr_swap_pages 0L 389 #define total_swap_pages 0L 390 #define total_swapcache_pages 0UL 391 392 #define si_swapinfo(val) \ 393 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 394 /* only sparc can not include linux/pagemap.h in this file 395 * so leave page_cache_release and release_pages undeclared... */ 396 #define free_page_and_swap_cache(page) \ 397 page_cache_release(page) 398 #define free_pages_and_swap_cache(pages, nr) \ 399 release_pages((pages), (nr), 0); 400 401 static inline void show_swap_cache_info(void) 402 { 403 } 404 405 #define free_swap_and_cache(swp) is_migration_entry(swp) 406 #define swapcache_prepare(swp) is_migration_entry(swp) 407 408 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) 409 { 410 return 0; 411 } 412 413 static inline void swap_shmem_alloc(swp_entry_t swp) 414 { 415 } 416 417 static inline int swap_duplicate(swp_entry_t swp) 418 { 419 return 0; 420 } 421 422 static inline void swap_free(swp_entry_t swp) 423 { 424 } 425 426 static inline void swapcache_free(swp_entry_t swp, struct page *page) 427 { 428 } 429 430 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, 431 struct vm_area_struct *vma, unsigned long addr) 432 { 433 return NULL; 434 } 435 436 static inline int swap_writepage(struct page *p, struct writeback_control *wbc) 437 { 438 return 0; 439 } 440 441 static inline struct page *lookup_swap_cache(swp_entry_t swp) 442 { 443 return NULL; 444 } 445 446 static inline int add_to_swap(struct page *page) 447 { 448 return 0; 449 } 450 451 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, 452 gfp_t gfp_mask) 453 { 454 return -1; 455 } 456 457 static inline void __delete_from_swap_cache(struct page *page) 458 { 459 } 460 461 static inline void delete_from_swap_cache(struct page *page) 462 { 463 } 464 465 #define reuse_swap_page(page) (page_mapcount(page) == 1) 466 467 static inline int try_to_free_swap(struct page *page) 468 { 469 return 0; 470 } 471 472 static inline swp_entry_t get_swap_page(void) 473 { 474 swp_entry_t entry; 475 entry.val = 0; 476 return entry; 477 } 478 479 /* linux/mm/thrash.c */ 480 static inline void put_swap_token(struct mm_struct *mm) 481 { 482 } 483 484 static inline void grab_swap_token(struct mm_struct *mm) 485 { 486 } 487 488 static inline int has_swap_token(struct mm_struct *mm) 489 { 490 return 0; 491 } 492 493 static inline void disable_swap_token(struct mem_cgroup *memcg) 494 { 495 } 496 497 static inline void 498 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) 499 { 500 } 501 502 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 503 static inline int 504 mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) 505 { 506 return 0; 507 } 508 #endif 509 510 #endif /* CONFIG_SWAP */ 511 #endif /* __KERNEL__*/ 512 #endif /* _LINUX_SWAP_H */ 513