1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MMZONE_H 3 #define _LINUX_MMZONE_H 4 5 #ifndef __ASSEMBLY__ 6 #ifndef __GENERATING_BOUNDS_H 7 8 #include <linux/spinlock.h> 9 #include <linux/list.h> 10 #include <linux/wait.h> 11 #include <linux/bitops.h> 12 #include <linux/cache.h> 13 #include <linux/threads.h> 14 #include <linux/numa.h> 15 #include <linux/init.h> 16 #include <linux/seqlock.h> 17 #include <linux/nodemask.h> 18 #include <linux/pageblock-flags.h> 19 #include <linux/page-flags-layout.h> 20 #include <linux/atomic.h> 21 #include <linux/mm_types.h> 22 #include <linux/page-flags.h> 23 #include <asm/page.h> 24 25 /* Free memory management - zoned buddy allocator. */ 26 #ifndef CONFIG_FORCE_MAX_ZONEORDER 27 #define MAX_ORDER 11 28 #else 29 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 30 #endif 31 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 32 33 /* 34 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 35 * costly to service. That is between allocation orders which should 36 * coalesce naturally under reasonable reclaim pressure and those which 37 * will not. 38 */ 39 #define PAGE_ALLOC_COSTLY_ORDER 3 40 41 enum migratetype { 42 MIGRATE_UNMOVABLE, 43 MIGRATE_MOVABLE, 44 MIGRATE_RECLAIMABLE, 45 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 47 #ifdef CONFIG_CMA 48 /* 49 * MIGRATE_CMA migration type is designed to mimic the way 50 * ZONE_MOVABLE works. Only movable pages can be allocated 51 * from MIGRATE_CMA pageblocks and page allocator never 52 * implicitly change migration type of MIGRATE_CMA pageblock. 53 * 54 * The way to use it is to change migratetype of a range of 55 * pageblocks to MIGRATE_CMA which can be done by 56 * __free_pageblock_cma() function. What is important though 57 * is that a range of pageblocks must be aligned to 58 * MAX_ORDER_NR_PAGES should biggest page be bigger than 59 * a single pageblock. 60 */ 61 MIGRATE_CMA, 62 #endif 63 #ifdef CONFIG_MEMORY_ISOLATION 64 MIGRATE_ISOLATE, /* can't allocate from here */ 65 #endif 66 MIGRATE_TYPES 67 }; 68 69 /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 70 extern const char * const migratetype_names[MIGRATE_TYPES]; 71 72 #ifdef CONFIG_CMA 73 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 74 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 75 #else 76 # define is_migrate_cma(migratetype) false 77 # define is_migrate_cma_page(_page) false 78 #endif 79 80 static inline bool is_migrate_movable(int mt) 81 { 82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 83 } 84 85 #define for_each_migratetype_order(order, type) \ 86 for (order = 0; order < MAX_ORDER; order++) \ 87 for (type = 0; type < MIGRATE_TYPES; type++) 88 89 extern int page_group_by_mobility_disabled; 90 91 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) 92 93 #define get_pageblock_migratetype(page) \ 94 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) 95 96 struct free_area { 97 struct list_head free_list[MIGRATE_TYPES]; 98 unsigned long nr_free; 99 }; 100 101 static inline struct page *get_page_from_free_area(struct free_area *area, 102 int migratetype) 103 { 104 return list_first_entry_or_null(&area->free_list[migratetype], 105 struct page, lru); 106 } 107 108 static inline bool free_area_empty(struct free_area *area, int migratetype) 109 { 110 return list_empty(&area->free_list[migratetype]); 111 } 112 113 struct pglist_data; 114 115 /* 116 * Add a wild amount of padding here to ensure datas fall into separate 117 * cachelines. There are very few zone structures in the machine, so space 118 * consumption is not a concern here. 119 */ 120 #if defined(CONFIG_SMP) 121 struct zone_padding { 122 char x[0]; 123 } ____cacheline_internodealigned_in_smp; 124 #define ZONE_PADDING(name) struct zone_padding name; 125 #else 126 #define ZONE_PADDING(name) 127 #endif 128 129 #ifdef CONFIG_NUMA 130 enum numa_stat_item { 131 NUMA_HIT, /* allocated in intended node */ 132 NUMA_MISS, /* allocated in non intended node */ 133 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 134 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 135 NUMA_LOCAL, /* allocation from local node */ 136 NUMA_OTHER, /* allocation from other node */ 137 NR_VM_NUMA_STAT_ITEMS 138 }; 139 #else 140 #define NR_VM_NUMA_STAT_ITEMS 0 141 #endif 142 143 enum zone_stat_item { 144 /* First 128 byte cacheline (assuming 64 bit words) */ 145 NR_FREE_PAGES, 146 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 147 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 148 NR_ZONE_ACTIVE_ANON, 149 NR_ZONE_INACTIVE_FILE, 150 NR_ZONE_ACTIVE_FILE, 151 NR_ZONE_UNEVICTABLE, 152 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 153 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 154 /* Second 128 byte cacheline */ 155 NR_BOUNCE, 156 #if IS_ENABLED(CONFIG_ZSMALLOC) 157 NR_ZSPAGES, /* allocated in zsmalloc */ 158 #endif 159 NR_FREE_CMA_PAGES, 160 NR_VM_ZONE_STAT_ITEMS }; 161 162 enum node_stat_item { 163 NR_LRU_BASE, 164 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 165 NR_ACTIVE_ANON, /* " " " " " */ 166 NR_INACTIVE_FILE, /* " " " " " */ 167 NR_ACTIVE_FILE, /* " " " " " */ 168 NR_UNEVICTABLE, /* " " " " " */ 169 NR_SLAB_RECLAIMABLE_B, 170 NR_SLAB_UNRECLAIMABLE_B, 171 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 172 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 173 WORKINGSET_NODES, 174 WORKINGSET_REFAULT_BASE, 175 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, 176 WORKINGSET_REFAULT_FILE, 177 WORKINGSET_ACTIVATE_BASE, 178 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, 179 WORKINGSET_ACTIVATE_FILE, 180 WORKINGSET_RESTORE_BASE, 181 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, 182 WORKINGSET_RESTORE_FILE, 183 WORKINGSET_NODERECLAIM, 184 NR_ANON_MAPPED, /* Mapped anonymous pages */ 185 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 186 only modified from process context */ 187 NR_FILE_PAGES, 188 NR_FILE_DIRTY, 189 NR_WRITEBACK, 190 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 191 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 192 NR_SHMEM_THPS, 193 NR_SHMEM_PMDMAPPED, 194 NR_FILE_THPS, 195 NR_FILE_PMDMAPPED, 196 NR_ANON_THPS, 197 NR_VMSCAN_WRITE, 198 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 199 NR_DIRTIED, /* page dirtyings since bootup */ 200 NR_WRITTEN, /* page writings since bootup */ 201 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 202 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ 203 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ 204 NR_KERNEL_STACK_KB, /* measured in KiB */ 205 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 206 NR_KERNEL_SCS_KB, /* measured in KiB */ 207 #endif 208 NR_PAGETABLE, /* used for pagetables */ 209 #ifdef CONFIG_SWAP 210 NR_SWAPCACHE, 211 #endif 212 NR_VM_NODE_STAT_ITEMS 213 }; 214 215 /* 216 * Returns true if the item should be printed in THPs (/proc/vmstat 217 * currently prints number of anon, file and shmem THPs. But the item 218 * is charged in pages). 219 */ 220 static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) 221 { 222 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 223 return false; 224 225 return item == NR_ANON_THPS || 226 item == NR_FILE_THPS || 227 item == NR_SHMEM_THPS || 228 item == NR_SHMEM_PMDMAPPED || 229 item == NR_FILE_PMDMAPPED; 230 } 231 232 /* 233 * Returns true if the value is measured in bytes (most vmstat values are 234 * measured in pages). This defines the API part, the internal representation 235 * might be different. 236 */ 237 static __always_inline bool vmstat_item_in_bytes(int idx) 238 { 239 /* 240 * Global and per-node slab counters track slab pages. 241 * It's expected that changes are multiples of PAGE_SIZE. 242 * Internally values are stored in pages. 243 * 244 * Per-memcg and per-lruvec counters track memory, consumed 245 * by individual slab objects. These counters are actually 246 * byte-precise. 247 */ 248 return (idx == NR_SLAB_RECLAIMABLE_B || 249 idx == NR_SLAB_UNRECLAIMABLE_B); 250 } 251 252 /* 253 * We do arithmetic on the LRU lists in various places in the code, 254 * so it is important to keep the active lists LRU_ACTIVE higher in 255 * the array than the corresponding inactive lists, and to keep 256 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 257 * 258 * This has to be kept in sync with the statistics in zone_stat_item 259 * above and the descriptions in vmstat_text in mm/vmstat.c 260 */ 261 #define LRU_BASE 0 262 #define LRU_ACTIVE 1 263 #define LRU_FILE 2 264 265 enum lru_list { 266 LRU_INACTIVE_ANON = LRU_BASE, 267 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 268 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 269 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 270 LRU_UNEVICTABLE, 271 NR_LRU_LISTS 272 }; 273 274 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 275 276 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 277 278 static inline bool is_file_lru(enum lru_list lru) 279 { 280 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 281 } 282 283 static inline bool is_active_lru(enum lru_list lru) 284 { 285 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 286 } 287 288 #define ANON_AND_FILE 2 289 290 enum lruvec_flags { 291 LRUVEC_CONGESTED, /* lruvec has many dirty pages 292 * backed by a congested BDI 293 */ 294 }; 295 296 struct lruvec { 297 struct list_head lists[NR_LRU_LISTS]; 298 /* per lruvec lru_lock for memcg */ 299 spinlock_t lru_lock; 300 /* 301 * These track the cost of reclaiming one LRU - file or anon - 302 * over the other. As the observed cost of reclaiming one LRU 303 * increases, the reclaim scan balance tips toward the other. 304 */ 305 unsigned long anon_cost; 306 unsigned long file_cost; 307 /* Non-resident age, driven by LRU movement */ 308 atomic_long_t nonresident_age; 309 /* Refaults at the time of last reclaim cycle */ 310 unsigned long refaults[ANON_AND_FILE]; 311 /* Various lruvec state flags (enum lruvec_flags) */ 312 unsigned long flags; 313 #ifdef CONFIG_MEMCG 314 struct pglist_data *pgdat; 315 #endif 316 }; 317 318 /* Isolate unmapped pages */ 319 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) 320 /* Isolate for asynchronous migration */ 321 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 322 /* Isolate unevictable pages */ 323 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 324 325 /* LRU Isolation modes. */ 326 typedef unsigned __bitwise isolate_mode_t; 327 328 enum zone_watermarks { 329 WMARK_MIN, 330 WMARK_LOW, 331 WMARK_HIGH, 332 NR_WMARK 333 }; 334 335 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) 336 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) 337 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) 338 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) 339 340 struct per_cpu_pages { 341 int count; /* number of pages in the list */ 342 int high; /* high watermark, emptying needed */ 343 int batch; /* chunk size for buddy add/remove */ 344 345 /* Lists of pages, one per migrate type stored on the pcp-lists */ 346 struct list_head lists[MIGRATE_PCPTYPES]; 347 }; 348 349 struct per_cpu_pageset { 350 struct per_cpu_pages pcp; 351 #ifdef CONFIG_NUMA 352 s8 expire; 353 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; 354 #endif 355 #ifdef CONFIG_SMP 356 s8 stat_threshold; 357 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 358 #endif 359 }; 360 361 struct per_cpu_nodestat { 362 s8 stat_threshold; 363 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 364 }; 365 366 #endif /* !__GENERATING_BOUNDS.H */ 367 368 enum zone_type { 369 /* 370 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able 371 * to DMA to all of the addressable memory (ZONE_NORMAL). 372 * On architectures where this area covers the whole 32 bit address 373 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller 374 * DMA addressing constraints. This distinction is important as a 32bit 375 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit 376 * platforms may need both zones as they support peripherals with 377 * different DMA addressing limitations. 378 */ 379 #ifdef CONFIG_ZONE_DMA 380 ZONE_DMA, 381 #endif 382 #ifdef CONFIG_ZONE_DMA32 383 ZONE_DMA32, 384 #endif 385 /* 386 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 387 * performed on pages in ZONE_NORMAL if the DMA devices support 388 * transfers to all addressable memory. 389 */ 390 ZONE_NORMAL, 391 #ifdef CONFIG_HIGHMEM 392 /* 393 * A memory area that is only addressable by the kernel through 394 * mapping portions into its own address space. This is for example 395 * used by i386 to allow the kernel to address the memory beyond 396 * 900MB. The kernel will set up special mappings (page 397 * table entries on i386) for each page that the kernel needs to 398 * access. 399 */ 400 ZONE_HIGHMEM, 401 #endif 402 /* 403 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains 404 * movable pages with few exceptional cases described below. Main use 405 * cases for ZONE_MOVABLE are to make memory offlining/unplug more 406 * likely to succeed, and to locally limit unmovable allocations - e.g., 407 * to increase the number of THP/huge pages. Notable special cases are: 408 * 409 * 1. Pinned pages: (long-term) pinning of movable pages might 410 * essentially turn such pages unmovable. Therefore, we do not allow 411 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and 412 * faulted, they come from the right zone right away. However, it is 413 * still possible that address space already has pages in 414 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has 415 * touches that memory before pinning). In such case we migrate them 416 * to a different zone. When migration fails - pinning fails. 417 * 2. memblock allocations: kernelcore/movablecore setups might create 418 * situations where ZONE_MOVABLE contains unmovable allocations 419 * after boot. Memory offlining and allocations fail early. 420 * 3. Memory holes: kernelcore/movablecore setups might create very rare 421 * situations where ZONE_MOVABLE contains memory holes after boot, 422 * for example, if we have sections that are only partially 423 * populated. Memory offlining and allocations fail early. 424 * 4. PG_hwpoison pages: while poisoned pages can be skipped during 425 * memory offlining, such pages cannot be allocated. 426 * 5. Unmovable PG_offline pages: in paravirtualized environments, 427 * hotplugged memory blocks might only partially be managed by the 428 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The 429 * parts not manged by the buddy are unmovable PG_offline pages. In 430 * some cases (virtio-mem), such pages can be skipped during 431 * memory offlining, however, cannot be moved/allocated. These 432 * techniques might use alloc_contig_range() to hide previously 433 * exposed pages from the buddy again (e.g., to implement some sort 434 * of memory unplug in virtio-mem). 435 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create 436 * situations where ZERO_PAGE(0) which is allocated differently 437 * on different platforms may end up in a movable zone. ZERO_PAGE(0) 438 * cannot be migrated. 439 * 7. Memory-hotplug: when using memmap_on_memory and onlining the 440 * memory to the MOVABLE zone, the vmemmap pages are also placed in 441 * such zone. Such pages cannot be really moved around as they are 442 * self-stored in the range, but they are treated as movable when 443 * the range they describe is about to be offlined. 444 * 445 * In general, no unmovable allocations that degrade memory offlining 446 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) 447 * have to expect that migrating pages in ZONE_MOVABLE can fail (even 448 * if has_unmovable_pages() states that there are no unmovable pages, 449 * there can be false negatives). 450 */ 451 ZONE_MOVABLE, 452 #ifdef CONFIG_ZONE_DEVICE 453 ZONE_DEVICE, 454 #endif 455 __MAX_NR_ZONES 456 457 }; 458 459 #ifndef __GENERATING_BOUNDS_H 460 461 #define ASYNC_AND_SYNC 2 462 463 struct zone { 464 /* Read-mostly fields */ 465 466 /* zone watermarks, access with *_wmark_pages(zone) macros */ 467 unsigned long _watermark[NR_WMARK]; 468 unsigned long watermark_boost; 469 470 unsigned long nr_reserved_highatomic; 471 472 /* 473 * We don't know if the memory that we're going to allocate will be 474 * freeable or/and it will be released eventually, so to avoid totally 475 * wasting several GB of ram we must reserve some of the lower zone 476 * memory (otherwise we risk to run OOM on the lower zones despite 477 * there being tons of freeable ram on the higher zones). This array is 478 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 479 * changes. 480 */ 481 long lowmem_reserve[MAX_NR_ZONES]; 482 483 #ifdef CONFIG_NUMA 484 int node; 485 #endif 486 struct pglist_data *zone_pgdat; 487 struct per_cpu_pageset __percpu *pageset; 488 /* 489 * the high and batch values are copied to individual pagesets for 490 * faster access 491 */ 492 int pageset_high; 493 int pageset_batch; 494 495 #ifndef CONFIG_SPARSEMEM 496 /* 497 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 498 * In SPARSEMEM, this map is stored in struct mem_section 499 */ 500 unsigned long *pageblock_flags; 501 #endif /* CONFIG_SPARSEMEM */ 502 503 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 504 unsigned long zone_start_pfn; 505 506 /* 507 * spanned_pages is the total pages spanned by the zone, including 508 * holes, which is calculated as: 509 * spanned_pages = zone_end_pfn - zone_start_pfn; 510 * 511 * present_pages is physical pages existing within the zone, which 512 * is calculated as: 513 * present_pages = spanned_pages - absent_pages(pages in holes); 514 * 515 * managed_pages is present pages managed by the buddy system, which 516 * is calculated as (reserved_pages includes pages allocated by the 517 * bootmem allocator): 518 * managed_pages = present_pages - reserved_pages; 519 * 520 * cma pages is present pages that are assigned for CMA use 521 * (MIGRATE_CMA). 522 * 523 * So present_pages may be used by memory hotplug or memory power 524 * management logic to figure out unmanaged pages by checking 525 * (present_pages - managed_pages). And managed_pages should be used 526 * by page allocator and vm scanner to calculate all kinds of watermarks 527 * and thresholds. 528 * 529 * Locking rules: 530 * 531 * zone_start_pfn and spanned_pages are protected by span_seqlock. 532 * It is a seqlock because it has to be read outside of zone->lock, 533 * and it is done in the main allocator path. But, it is written 534 * quite infrequently. 535 * 536 * The span_seq lock is declared along with zone->lock because it is 537 * frequently read in proximity to zone->lock. It's good to 538 * give them a chance of being in the same cacheline. 539 * 540 * Write access to present_pages at runtime should be protected by 541 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of 542 * present_pages should get_online_mems() to get a stable value. 543 */ 544 atomic_long_t managed_pages; 545 unsigned long spanned_pages; 546 unsigned long present_pages; 547 #ifdef CONFIG_CMA 548 unsigned long cma_pages; 549 #endif 550 551 const char *name; 552 553 #ifdef CONFIG_MEMORY_ISOLATION 554 /* 555 * Number of isolated pageblock. It is used to solve incorrect 556 * freepage counting problem due to racy retrieving migratetype 557 * of pageblock. Protected by zone->lock. 558 */ 559 unsigned long nr_isolate_pageblock; 560 #endif 561 562 #ifdef CONFIG_MEMORY_HOTPLUG 563 /* see spanned/present_pages for more description */ 564 seqlock_t span_seqlock; 565 #endif 566 567 int initialized; 568 569 /* Write-intensive fields used from the page allocator */ 570 ZONE_PADDING(_pad1_) 571 572 /* free areas of different sizes */ 573 struct free_area free_area[MAX_ORDER]; 574 575 /* zone flags, see below */ 576 unsigned long flags; 577 578 /* Primarily protects free_area */ 579 spinlock_t lock; 580 581 /* Write-intensive fields used by compaction and vmstats. */ 582 ZONE_PADDING(_pad2_) 583 584 /* 585 * When free pages are below this point, additional steps are taken 586 * when reading the number of free pages to avoid per-cpu counter 587 * drift allowing watermarks to be breached 588 */ 589 unsigned long percpu_drift_mark; 590 591 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 592 /* pfn where compaction free scanner should start */ 593 unsigned long compact_cached_free_pfn; 594 /* pfn where compaction migration scanner should start */ 595 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; 596 unsigned long compact_init_migrate_pfn; 597 unsigned long compact_init_free_pfn; 598 #endif 599 600 #ifdef CONFIG_COMPACTION 601 /* 602 * On compaction failure, 1<<compact_defer_shift compactions 603 * are skipped before trying again. The number attempted since 604 * last failure is tracked with compact_considered. 605 * compact_order_failed is the minimum compaction failed order. 606 */ 607 unsigned int compact_considered; 608 unsigned int compact_defer_shift; 609 int compact_order_failed; 610 #endif 611 612 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 613 /* Set to true when the PG_migrate_skip bits should be cleared */ 614 bool compact_blockskip_flush; 615 #endif 616 617 bool contiguous; 618 619 ZONE_PADDING(_pad3_) 620 /* Zone statistics */ 621 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 622 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; 623 } ____cacheline_internodealigned_in_smp; 624 625 enum pgdat_flags { 626 PGDAT_DIRTY, /* reclaim scanning has recently found 627 * many dirty file pages at the tail 628 * of the LRU. 629 */ 630 PGDAT_WRITEBACK, /* reclaim scanning has recently found 631 * many pages under writeback 632 */ 633 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 634 }; 635 636 enum zone_flags { 637 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 638 * Cleared when kswapd is woken. 639 */ 640 }; 641 642 static inline unsigned long zone_managed_pages(struct zone *zone) 643 { 644 return (unsigned long)atomic_long_read(&zone->managed_pages); 645 } 646 647 static inline unsigned long zone_cma_pages(struct zone *zone) 648 { 649 #ifdef CONFIG_CMA 650 return zone->cma_pages; 651 #else 652 return 0; 653 #endif 654 } 655 656 static inline unsigned long zone_end_pfn(const struct zone *zone) 657 { 658 return zone->zone_start_pfn + zone->spanned_pages; 659 } 660 661 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 662 { 663 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 664 } 665 666 static inline bool zone_is_initialized(struct zone *zone) 667 { 668 return zone->initialized; 669 } 670 671 static inline bool zone_is_empty(struct zone *zone) 672 { 673 return zone->spanned_pages == 0; 674 } 675 676 /* 677 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 678 * intersection with the given zone 679 */ 680 static inline bool zone_intersects(struct zone *zone, 681 unsigned long start_pfn, unsigned long nr_pages) 682 { 683 if (zone_is_empty(zone)) 684 return false; 685 if (start_pfn >= zone_end_pfn(zone) || 686 start_pfn + nr_pages <= zone->zone_start_pfn) 687 return false; 688 689 return true; 690 } 691 692 /* 693 * The "priority" of VM scanning is how much of the queues we will scan in one 694 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 695 * queues ("queue_length >> 12") during an aging round. 696 */ 697 #define DEF_PRIORITY 12 698 699 /* Maximum number of zones on a zonelist */ 700 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 701 702 enum { 703 ZONELIST_FALLBACK, /* zonelist with fallback */ 704 #ifdef CONFIG_NUMA 705 /* 706 * The NUMA zonelists are doubled because we need zonelists that 707 * restrict the allocations to a single node for __GFP_THISNODE. 708 */ 709 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 710 #endif 711 MAX_ZONELISTS 712 }; 713 714 /* 715 * This struct contains information about a zone in a zonelist. It is stored 716 * here to avoid dereferences into large structures and lookups of tables 717 */ 718 struct zoneref { 719 struct zone *zone; /* Pointer to actual zone */ 720 int zone_idx; /* zone_idx(zoneref->zone) */ 721 }; 722 723 /* 724 * One allocation request operates on a zonelist. A zonelist 725 * is a list of zones, the first one is the 'goal' of the 726 * allocation, the other zones are fallback zones, in decreasing 727 * priority. 728 * 729 * To speed the reading of the zonelist, the zonerefs contain the zone index 730 * of the entry being read. Helper functions to access information given 731 * a struct zoneref are 732 * 733 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 734 * zonelist_zone_idx() - Return the index of the zone for an entry 735 * zonelist_node_idx() - Return the index of the node for an entry 736 */ 737 struct zonelist { 738 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 739 }; 740 741 #ifndef CONFIG_DISCONTIGMEM 742 /* The array of struct pages - for discontigmem use pgdat->lmem_map */ 743 extern struct page *mem_map; 744 #endif 745 746 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 747 struct deferred_split { 748 spinlock_t split_queue_lock; 749 struct list_head split_queue; 750 unsigned long split_queue_len; 751 }; 752 #endif 753 754 /* 755 * On NUMA machines, each NUMA node would have a pg_data_t to describe 756 * it's memory layout. On UMA machines there is a single pglist_data which 757 * describes the whole memory. 758 * 759 * Memory statistics and page replacement data structures are maintained on a 760 * per-zone basis. 761 */ 762 typedef struct pglist_data { 763 /* 764 * node_zones contains just the zones for THIS node. Not all of the 765 * zones may be populated, but it is the full list. It is referenced by 766 * this node's node_zonelists as well as other node's node_zonelists. 767 */ 768 struct zone node_zones[MAX_NR_ZONES]; 769 770 /* 771 * node_zonelists contains references to all zones in all nodes. 772 * Generally the first zones will be references to this node's 773 * node_zones. 774 */ 775 struct zonelist node_zonelists[MAX_ZONELISTS]; 776 777 int nr_zones; /* number of populated zones in this node */ 778 #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 779 struct page *node_mem_map; 780 #ifdef CONFIG_PAGE_EXTENSION 781 struct page_ext *node_page_ext; 782 #endif 783 #endif 784 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 785 /* 786 * Must be held any time you expect node_start_pfn, 787 * node_present_pages, node_spanned_pages or nr_zones to stay constant. 788 * Also synchronizes pgdat->first_deferred_pfn during deferred page 789 * init. 790 * 791 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 792 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 793 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 794 * 795 * Nests above zone->lock and zone->span_seqlock 796 */ 797 spinlock_t node_size_lock; 798 #endif 799 unsigned long node_start_pfn; 800 unsigned long node_present_pages; /* total number of physical pages */ 801 unsigned long node_spanned_pages; /* total size of physical page 802 range, including holes */ 803 int node_id; 804 wait_queue_head_t kswapd_wait; 805 wait_queue_head_t pfmemalloc_wait; 806 struct task_struct *kswapd; /* Protected by 807 mem_hotplug_begin/end() */ 808 int kswapd_order; 809 enum zone_type kswapd_highest_zoneidx; 810 811 int kswapd_failures; /* Number of 'reclaimed == 0' runs */ 812 813 #ifdef CONFIG_COMPACTION 814 int kcompactd_max_order; 815 enum zone_type kcompactd_highest_zoneidx; 816 wait_queue_head_t kcompactd_wait; 817 struct task_struct *kcompactd; 818 #endif 819 /* 820 * This is a per-node reserve of pages that are not available 821 * to userspace allocations. 822 */ 823 unsigned long totalreserve_pages; 824 825 #ifdef CONFIG_NUMA 826 /* 827 * node reclaim becomes active if more unmapped pages exist. 828 */ 829 unsigned long min_unmapped_pages; 830 unsigned long min_slab_pages; 831 #endif /* CONFIG_NUMA */ 832 833 /* Write-intensive fields used by page reclaim */ 834 ZONE_PADDING(_pad1_) 835 836 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 837 /* 838 * If memory initialisation on large machines is deferred then this 839 * is the first PFN that needs to be initialised. 840 */ 841 unsigned long first_deferred_pfn; 842 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 843 844 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 845 struct deferred_split deferred_split_queue; 846 #endif 847 848 /* Fields commonly accessed by the page reclaim scanner */ 849 850 /* 851 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. 852 * 853 * Use mem_cgroup_lruvec() to look up lruvecs. 854 */ 855 struct lruvec __lruvec; 856 857 unsigned long flags; 858 859 ZONE_PADDING(_pad2_) 860 861 /* Per-node vmstats */ 862 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 863 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 864 } pg_data_t; 865 866 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 867 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 868 #ifdef CONFIG_FLAT_NODE_MEM_MAP 869 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 870 #else 871 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 872 #endif 873 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 874 875 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 876 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 877 878 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 879 { 880 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 881 } 882 883 static inline bool pgdat_is_empty(pg_data_t *pgdat) 884 { 885 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 886 } 887 888 #include <linux/memory_hotplug.h> 889 890 void build_all_zonelists(pg_data_t *pgdat); 891 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 892 enum zone_type highest_zoneidx); 893 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 894 int highest_zoneidx, unsigned int alloc_flags, 895 long free_pages); 896 bool zone_watermark_ok(struct zone *z, unsigned int order, 897 unsigned long mark, int highest_zoneidx, 898 unsigned int alloc_flags); 899 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 900 unsigned long mark, int highest_zoneidx); 901 /* 902 * Memory initialization context, use to differentiate memory added by 903 * the platform statically or via memory hotplug interface. 904 */ 905 enum meminit_context { 906 MEMINIT_EARLY, 907 MEMINIT_HOTPLUG, 908 }; 909 910 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 911 unsigned long size); 912 913 extern void lruvec_init(struct lruvec *lruvec); 914 915 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 916 { 917 #ifdef CONFIG_MEMCG 918 return lruvec->pgdat; 919 #else 920 return container_of(lruvec, struct pglist_data, __lruvec); 921 #endif 922 } 923 924 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 925 int local_memory_node(int node_id); 926 #else 927 static inline int local_memory_node(int node_id) { return node_id; }; 928 #endif 929 930 /* 931 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 932 */ 933 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 934 935 #ifdef CONFIG_ZONE_DEVICE 936 static inline bool zone_is_zone_device(struct zone *zone) 937 { 938 return zone_idx(zone) == ZONE_DEVICE; 939 } 940 #else 941 static inline bool zone_is_zone_device(struct zone *zone) 942 { 943 return false; 944 } 945 #endif 946 947 /* 948 * Returns true if a zone has pages managed by the buddy allocator. 949 * All the reclaim decisions have to use this function rather than 950 * populated_zone(). If the whole zone is reserved then we can easily 951 * end up with populated_zone() && !managed_zone(). 952 */ 953 static inline bool managed_zone(struct zone *zone) 954 { 955 return zone_managed_pages(zone); 956 } 957 958 /* Returns true if a zone has memory */ 959 static inline bool populated_zone(struct zone *zone) 960 { 961 return zone->present_pages; 962 } 963 964 #ifdef CONFIG_NUMA 965 static inline int zone_to_nid(struct zone *zone) 966 { 967 return zone->node; 968 } 969 970 static inline void zone_set_nid(struct zone *zone, int nid) 971 { 972 zone->node = nid; 973 } 974 #else 975 static inline int zone_to_nid(struct zone *zone) 976 { 977 return 0; 978 } 979 980 static inline void zone_set_nid(struct zone *zone, int nid) {} 981 #endif 982 983 extern int movable_zone; 984 985 #ifdef CONFIG_HIGHMEM 986 static inline int zone_movable_is_highmem(void) 987 { 988 #ifdef CONFIG_NEED_MULTIPLE_NODES 989 return movable_zone == ZONE_HIGHMEM; 990 #else 991 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; 992 #endif 993 } 994 #endif 995 996 static inline int is_highmem_idx(enum zone_type idx) 997 { 998 #ifdef CONFIG_HIGHMEM 999 return (idx == ZONE_HIGHMEM || 1000 (idx == ZONE_MOVABLE && zone_movable_is_highmem())); 1001 #else 1002 return 0; 1003 #endif 1004 } 1005 1006 /** 1007 * is_highmem - helper function to quickly check if a struct zone is a 1008 * highmem zone or not. This is an attempt to keep references 1009 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 1010 * @zone: pointer to struct zone variable 1011 * Return: 1 for a highmem zone, 0 otherwise 1012 */ 1013 static inline int is_highmem(struct zone *zone) 1014 { 1015 #ifdef CONFIG_HIGHMEM 1016 return is_highmem_idx(zone_idx(zone)); 1017 #else 1018 return 0; 1019 #endif 1020 } 1021 1022 /* These two functions are used to setup the per zone pages min values */ 1023 struct ctl_table; 1024 1025 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, 1026 loff_t *); 1027 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, 1028 size_t *, loff_t *); 1029 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; 1030 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, 1031 size_t *, loff_t *); 1032 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, 1033 void *, size_t *, loff_t *); 1034 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 1035 void *, size_t *, loff_t *); 1036 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 1037 void *, size_t *, loff_t *); 1038 int numa_zonelist_order_handler(struct ctl_table *, int, 1039 void *, size_t *, loff_t *); 1040 extern int percpu_pagelist_fraction; 1041 extern char numa_zonelist_order[]; 1042 #define NUMA_ZONELIST_ORDER_LEN 16 1043 1044 #ifndef CONFIG_NEED_MULTIPLE_NODES 1045 1046 extern struct pglist_data contig_page_data; 1047 #define NODE_DATA(nid) (&contig_page_data) 1048 #define NODE_MEM_MAP(nid) mem_map 1049 1050 #else /* CONFIG_NEED_MULTIPLE_NODES */ 1051 1052 #include <asm/mmzone.h> 1053 1054 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 1055 1056 extern struct pglist_data *first_online_pgdat(void); 1057 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 1058 extern struct zone *next_zone(struct zone *zone); 1059 1060 /** 1061 * for_each_online_pgdat - helper macro to iterate over all online nodes 1062 * @pgdat: pointer to a pg_data_t variable 1063 */ 1064 #define for_each_online_pgdat(pgdat) \ 1065 for (pgdat = first_online_pgdat(); \ 1066 pgdat; \ 1067 pgdat = next_online_pgdat(pgdat)) 1068 /** 1069 * for_each_zone - helper macro to iterate over all memory zones 1070 * @zone: pointer to struct zone variable 1071 * 1072 * The user only needs to declare the zone variable, for_each_zone 1073 * fills it in. 1074 */ 1075 #define for_each_zone(zone) \ 1076 for (zone = (first_online_pgdat())->node_zones; \ 1077 zone; \ 1078 zone = next_zone(zone)) 1079 1080 #define for_each_populated_zone(zone) \ 1081 for (zone = (first_online_pgdat())->node_zones; \ 1082 zone; \ 1083 zone = next_zone(zone)) \ 1084 if (!populated_zone(zone)) \ 1085 ; /* do nothing */ \ 1086 else 1087 1088 static inline struct zone *zonelist_zone(struct zoneref *zoneref) 1089 { 1090 return zoneref->zone; 1091 } 1092 1093 static inline int zonelist_zone_idx(struct zoneref *zoneref) 1094 { 1095 return zoneref->zone_idx; 1096 } 1097 1098 static inline int zonelist_node_idx(struct zoneref *zoneref) 1099 { 1100 return zone_to_nid(zoneref->zone); 1101 } 1102 1103 struct zoneref *__next_zones_zonelist(struct zoneref *z, 1104 enum zone_type highest_zoneidx, 1105 nodemask_t *nodes); 1106 1107 /** 1108 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1109 * @z: The cursor used as a starting point for the search 1110 * @highest_zoneidx: The zone index of the highest zone to return 1111 * @nodes: An optional nodemask to filter the zonelist with 1112 * 1113 * This function returns the next zone at or below a given zone index that is 1114 * within the allowed nodemask using a cursor as the starting point for the 1115 * search. The zoneref returned is a cursor that represents the current zone 1116 * being examined. It should be advanced by one before calling 1117 * next_zones_zonelist again. 1118 * 1119 * Return: the next zone at or below highest_zoneidx within the allowed 1120 * nodemask using a cursor within a zonelist as a starting point 1121 */ 1122 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 1123 enum zone_type highest_zoneidx, 1124 nodemask_t *nodes) 1125 { 1126 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 1127 return z; 1128 return __next_zones_zonelist(z, highest_zoneidx, nodes); 1129 } 1130 1131 /** 1132 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1133 * @zonelist: The zonelist to search for a suitable zone 1134 * @highest_zoneidx: The zone index of the highest zone to return 1135 * @nodes: An optional nodemask to filter the zonelist with 1136 * 1137 * This function returns the first zone at or below a given zone index that is 1138 * within the allowed nodemask. The zoneref returned is a cursor that can be 1139 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1140 * one before calling. 1141 * 1142 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1143 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1144 * update due to cpuset modification. 1145 * 1146 * Return: Zoneref pointer for the first suitable zone found 1147 */ 1148 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1149 enum zone_type highest_zoneidx, 1150 nodemask_t *nodes) 1151 { 1152 return next_zones_zonelist(zonelist->_zonerefs, 1153 highest_zoneidx, nodes); 1154 } 1155 1156 /** 1157 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1158 * @zone: The current zone in the iterator 1159 * @z: The current pointer within zonelist->_zonerefs being iterated 1160 * @zlist: The zonelist being iterated 1161 * @highidx: The zone index of the highest zone to return 1162 * @nodemask: Nodemask allowed by the allocator 1163 * 1164 * This iterator iterates though all zones at or below a given zone index and 1165 * within a given nodemask 1166 */ 1167 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1168 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1169 zone; \ 1170 z = next_zones_zonelist(++z, highidx, nodemask), \ 1171 zone = zonelist_zone(z)) 1172 1173 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ 1174 for (zone = z->zone; \ 1175 zone; \ 1176 z = next_zones_zonelist(++z, highidx, nodemask), \ 1177 zone = zonelist_zone(z)) 1178 1179 1180 /** 1181 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1182 * @zone: The current zone in the iterator 1183 * @z: The current pointer within zonelist->zones being iterated 1184 * @zlist: The zonelist being iterated 1185 * @highidx: The zone index of the highest zone to return 1186 * 1187 * This iterator iterates though all zones at or below a given zone index. 1188 */ 1189 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1190 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1191 1192 #ifdef CONFIG_SPARSEMEM 1193 #include <asm/sparsemem.h> 1194 #endif 1195 1196 #ifdef CONFIG_FLATMEM 1197 #define pfn_to_nid(pfn) (0) 1198 #endif 1199 1200 #ifdef CONFIG_SPARSEMEM 1201 1202 /* 1203 * SECTION_SHIFT #bits space required to store a section # 1204 * 1205 * PA_SECTION_SHIFT physical address to/from section number 1206 * PFN_SECTION_SHIFT pfn to/from section number 1207 */ 1208 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1209 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1210 1211 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1212 1213 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1214 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1215 1216 #define SECTION_BLOCKFLAGS_BITS \ 1217 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1218 1219 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1220 #error Allocator MAX_ORDER exceeds SECTION_SIZE 1221 #endif 1222 1223 static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1224 { 1225 return pfn >> PFN_SECTION_SHIFT; 1226 } 1227 static inline unsigned long section_nr_to_pfn(unsigned long sec) 1228 { 1229 return sec << PFN_SECTION_SHIFT; 1230 } 1231 1232 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1233 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1234 1235 #define SUBSECTION_SHIFT 21 1236 #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) 1237 1238 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) 1239 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) 1240 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) 1241 1242 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS 1243 #error Subsection size exceeds section size 1244 #else 1245 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) 1246 #endif 1247 1248 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) 1249 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) 1250 1251 struct mem_section_usage { 1252 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1253 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); 1254 #endif 1255 /* See declaration of similar field in struct zone */ 1256 unsigned long pageblock_flags[0]; 1257 }; 1258 1259 void subsection_map_init(unsigned long pfn, unsigned long nr_pages); 1260 1261 struct page; 1262 struct page_ext; 1263 struct mem_section { 1264 /* 1265 * This is, logically, a pointer to an array of struct 1266 * pages. However, it is stored with some other magic. 1267 * (see sparse.c::sparse_init_one_section()) 1268 * 1269 * Additionally during early boot we encode node id of 1270 * the location of the section here to guide allocation. 1271 * (see sparse.c::memory_present()) 1272 * 1273 * Making it a UL at least makes someone do a cast 1274 * before using it wrong. 1275 */ 1276 unsigned long section_mem_map; 1277 1278 struct mem_section_usage *usage; 1279 #ifdef CONFIG_PAGE_EXTENSION 1280 /* 1281 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 1282 * section. (see page_ext.h about this.) 1283 */ 1284 struct page_ext *page_ext; 1285 unsigned long pad; 1286 #endif 1287 /* 1288 * WARNING: mem_section must be a power-of-2 in size for the 1289 * calculation and use of SECTION_ROOT_MASK to make sense. 1290 */ 1291 }; 1292 1293 #ifdef CONFIG_SPARSEMEM_EXTREME 1294 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1295 #else 1296 #define SECTIONS_PER_ROOT 1 1297 #endif 1298 1299 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1300 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1301 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1302 1303 #ifdef CONFIG_SPARSEMEM_EXTREME 1304 extern struct mem_section **mem_section; 1305 #else 1306 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1307 #endif 1308 1309 static inline unsigned long *section_to_usemap(struct mem_section *ms) 1310 { 1311 return ms->usage->pageblock_flags; 1312 } 1313 1314 static inline struct mem_section *__nr_to_section(unsigned long nr) 1315 { 1316 #ifdef CONFIG_SPARSEMEM_EXTREME 1317 if (!mem_section) 1318 return NULL; 1319 #endif 1320 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 1321 return NULL; 1322 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 1323 } 1324 extern unsigned long __section_nr(struct mem_section *ms); 1325 extern size_t mem_section_usage_size(void); 1326 1327 /* 1328 * We use the lower bits of the mem_map pointer to store 1329 * a little bit of information. The pointer is calculated 1330 * as mem_map - section_nr_to_pfn(pnum). The result is 1331 * aligned to the minimum alignment of the two values: 1332 * 1. All mem_map arrays are page-aligned. 1333 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 1334 * lowest bits. PFN_SECTION_SHIFT is arch-specific 1335 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 1336 * worst combination is powerpc with 256k pages, 1337 * which results in PFN_SECTION_SHIFT equal 6. 1338 * To sum it up, at least 6 bits are available. 1339 */ 1340 #define SECTION_MARKED_PRESENT (1UL<<0) 1341 #define SECTION_HAS_MEM_MAP (1UL<<1) 1342 #define SECTION_IS_ONLINE (1UL<<2) 1343 #define SECTION_IS_EARLY (1UL<<3) 1344 #define SECTION_TAINT_ZONE_DEVICE (1UL<<4) 1345 #define SECTION_MAP_LAST_BIT (1UL<<5) 1346 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1347 #define SECTION_NID_SHIFT 3 1348 1349 static inline struct page *__section_mem_map_addr(struct mem_section *section) 1350 { 1351 unsigned long map = section->section_mem_map; 1352 map &= SECTION_MAP_MASK; 1353 return (struct page *)map; 1354 } 1355 1356 static inline int present_section(struct mem_section *section) 1357 { 1358 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 1359 } 1360 1361 static inline int present_section_nr(unsigned long nr) 1362 { 1363 return present_section(__nr_to_section(nr)); 1364 } 1365 1366 static inline int valid_section(struct mem_section *section) 1367 { 1368 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 1369 } 1370 1371 static inline int early_section(struct mem_section *section) 1372 { 1373 return (section && (section->section_mem_map & SECTION_IS_EARLY)); 1374 } 1375 1376 static inline int valid_section_nr(unsigned long nr) 1377 { 1378 return valid_section(__nr_to_section(nr)); 1379 } 1380 1381 static inline int online_section(struct mem_section *section) 1382 { 1383 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 1384 } 1385 1386 static inline int online_device_section(struct mem_section *section) 1387 { 1388 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; 1389 1390 return section && ((section->section_mem_map & flags) == flags); 1391 } 1392 1393 static inline int online_section_nr(unsigned long nr) 1394 { 1395 return online_section(__nr_to_section(nr)); 1396 } 1397 1398 #ifdef CONFIG_MEMORY_HOTPLUG 1399 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1400 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1401 #endif 1402 1403 static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1404 { 1405 return __nr_to_section(pfn_to_section_nr(pfn)); 1406 } 1407 1408 extern unsigned long __highest_present_section_nr; 1409 1410 static inline int subsection_map_index(unsigned long pfn) 1411 { 1412 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; 1413 } 1414 1415 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1416 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1417 { 1418 int idx = subsection_map_index(pfn); 1419 1420 return test_bit(idx, ms->usage->subsection_map); 1421 } 1422 #else 1423 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1424 { 1425 return 1; 1426 } 1427 #endif 1428 1429 #ifndef CONFIG_HAVE_ARCH_PFN_VALID 1430 static inline int pfn_valid(unsigned long pfn) 1431 { 1432 struct mem_section *ms; 1433 1434 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1435 return 0; 1436 ms = __nr_to_section(pfn_to_section_nr(pfn)); 1437 if (!valid_section(ms)) 1438 return 0; 1439 /* 1440 * Traditionally early sections always returned pfn_valid() for 1441 * the entire section-sized span. 1442 */ 1443 return early_section(ms) || pfn_section_valid(ms, pfn); 1444 } 1445 #endif 1446 1447 static inline int pfn_in_present_section(unsigned long pfn) 1448 { 1449 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1450 return 0; 1451 return present_section(__nr_to_section(pfn_to_section_nr(pfn))); 1452 } 1453 1454 static inline unsigned long next_present_section_nr(unsigned long section_nr) 1455 { 1456 while (++section_nr <= __highest_present_section_nr) { 1457 if (present_section_nr(section_nr)) 1458 return section_nr; 1459 } 1460 1461 return -1; 1462 } 1463 1464 /* 1465 * These are _only_ used during initialisation, therefore they 1466 * can use __initdata ... They could have names to indicate 1467 * this restriction. 1468 */ 1469 #ifdef CONFIG_NUMA 1470 #define pfn_to_nid(pfn) \ 1471 ({ \ 1472 unsigned long __pfn_to_nid_pfn = (pfn); \ 1473 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 1474 }) 1475 #else 1476 #define pfn_to_nid(pfn) (0) 1477 #endif 1478 1479 void sparse_init(void); 1480 #else 1481 #define sparse_init() do {} while (0) 1482 #define sparse_index_init(_sec, _nid) do {} while (0) 1483 #define pfn_in_present_section pfn_valid 1484 #define subsection_map_init(_pfn, _nr_pages) do {} while (0) 1485 #endif /* CONFIG_SPARSEMEM */ 1486 1487 /* 1488 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 1489 * need to check pfn validity within that MAX_ORDER_NR_PAGES block. 1490 * pfn_valid_within() should be used in this case; we optimise this away 1491 * when we have no holes within a MAX_ORDER_NR_PAGES block. 1492 */ 1493 #ifdef CONFIG_HOLES_IN_ZONE 1494 #define pfn_valid_within(pfn) pfn_valid(pfn) 1495 #else 1496 #define pfn_valid_within(pfn) (1) 1497 #endif 1498 1499 #endif /* !__GENERATING_BOUNDS.H */ 1500 #endif /* !__ASSEMBLY__ */ 1501 #endif /* _LINUX_MMZONE_H */ 1502