1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MMZONE_H 3 #define _LINUX_MMZONE_H 4 5 #ifndef __ASSEMBLY__ 6 #ifndef __GENERATING_BOUNDS_H 7 8 #include <linux/spinlock.h> 9 #include <linux/list.h> 10 #include <linux/wait.h> 11 #include <linux/bitops.h> 12 #include <linux/cache.h> 13 #include <linux/threads.h> 14 #include <linux/numa.h> 15 #include <linux/init.h> 16 #include <linux/seqlock.h> 17 #include <linux/nodemask.h> 18 #include <linux/pageblock-flags.h> 19 #include <linux/page-flags-layout.h> 20 #include <linux/atomic.h> 21 #include <linux/mm_types.h> 22 #include <linux/page-flags.h> 23 #include <linux/local_lock.h> 24 #include <asm/page.h> 25 26 /* Free memory management - zoned buddy allocator. */ 27 #ifndef CONFIG_FORCE_MAX_ZONEORDER 28 #define MAX_ORDER 11 29 #else 30 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 31 #endif 32 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 33 34 /* 35 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 36 * costly to service. That is between allocation orders which should 37 * coalesce naturally under reasonable reclaim pressure and those which 38 * will not. 39 */ 40 #define PAGE_ALLOC_COSTLY_ORDER 3 41 42 enum migratetype { 43 MIGRATE_UNMOVABLE, 44 MIGRATE_MOVABLE, 45 MIGRATE_RECLAIMABLE, 46 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 47 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 48 #ifdef CONFIG_CMA 49 /* 50 * MIGRATE_CMA migration type is designed to mimic the way 51 * ZONE_MOVABLE works. Only movable pages can be allocated 52 * from MIGRATE_CMA pageblocks and page allocator never 53 * implicitly change migration type of MIGRATE_CMA pageblock. 54 * 55 * The way to use it is to change migratetype of a range of 56 * pageblocks to MIGRATE_CMA which can be done by 57 * __free_pageblock_cma() function. What is important though 58 * is that a range of pageblocks must be aligned to 59 * MAX_ORDER_NR_PAGES should biggest page be bigger than 60 * a single pageblock. 61 */ 62 MIGRATE_CMA, 63 #endif 64 #ifdef CONFIG_MEMORY_ISOLATION 65 MIGRATE_ISOLATE, /* can't allocate from here */ 66 #endif 67 MIGRATE_TYPES 68 }; 69 70 /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 71 extern const char * const migratetype_names[MIGRATE_TYPES]; 72 73 #ifdef CONFIG_CMA 74 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 75 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 76 #else 77 # define is_migrate_cma(migratetype) false 78 # define is_migrate_cma_page(_page) false 79 #endif 80 81 static inline bool is_migrate_movable(int mt) 82 { 83 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 84 } 85 86 #define for_each_migratetype_order(order, type) \ 87 for (order = 0; order < MAX_ORDER; order++) \ 88 for (type = 0; type < MIGRATE_TYPES; type++) 89 90 extern int page_group_by_mobility_disabled; 91 92 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) 93 94 #define get_pageblock_migratetype(page) \ 95 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) 96 97 struct free_area { 98 struct list_head free_list[MIGRATE_TYPES]; 99 unsigned long nr_free; 100 }; 101 102 static inline struct page *get_page_from_free_area(struct free_area *area, 103 int migratetype) 104 { 105 return list_first_entry_or_null(&area->free_list[migratetype], 106 struct page, lru); 107 } 108 109 static inline bool free_area_empty(struct free_area *area, int migratetype) 110 { 111 return list_empty(&area->free_list[migratetype]); 112 } 113 114 struct pglist_data; 115 116 /* 117 * Add a wild amount of padding here to ensure data fall into separate 118 * cachelines. There are very few zone structures in the machine, so space 119 * consumption is not a concern here. 120 */ 121 #if defined(CONFIG_SMP) 122 struct zone_padding { 123 char x[0]; 124 } ____cacheline_internodealigned_in_smp; 125 #define ZONE_PADDING(name) struct zone_padding name; 126 #else 127 #define ZONE_PADDING(name) 128 #endif 129 130 #ifdef CONFIG_NUMA 131 enum numa_stat_item { 132 NUMA_HIT, /* allocated in intended node */ 133 NUMA_MISS, /* allocated in non intended node */ 134 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 135 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 136 NUMA_LOCAL, /* allocation from local node */ 137 NUMA_OTHER, /* allocation from other node */ 138 NR_VM_NUMA_EVENT_ITEMS 139 }; 140 #else 141 #define NR_VM_NUMA_EVENT_ITEMS 0 142 #endif 143 144 enum zone_stat_item { 145 /* First 128 byte cacheline (assuming 64 bit words) */ 146 NR_FREE_PAGES, 147 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 148 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 149 NR_ZONE_ACTIVE_ANON, 150 NR_ZONE_INACTIVE_FILE, 151 NR_ZONE_ACTIVE_FILE, 152 NR_ZONE_UNEVICTABLE, 153 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 154 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 155 /* Second 128 byte cacheline */ 156 NR_BOUNCE, 157 #if IS_ENABLED(CONFIG_ZSMALLOC) 158 NR_ZSPAGES, /* allocated in zsmalloc */ 159 #endif 160 NR_FREE_CMA_PAGES, 161 NR_VM_ZONE_STAT_ITEMS }; 162 163 enum node_stat_item { 164 NR_LRU_BASE, 165 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 166 NR_ACTIVE_ANON, /* " " " " " */ 167 NR_INACTIVE_FILE, /* " " " " " */ 168 NR_ACTIVE_FILE, /* " " " " " */ 169 NR_UNEVICTABLE, /* " " " " " */ 170 NR_SLAB_RECLAIMABLE_B, 171 NR_SLAB_UNRECLAIMABLE_B, 172 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 173 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 174 WORKINGSET_NODES, 175 WORKINGSET_REFAULT_BASE, 176 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, 177 WORKINGSET_REFAULT_FILE, 178 WORKINGSET_ACTIVATE_BASE, 179 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, 180 WORKINGSET_ACTIVATE_FILE, 181 WORKINGSET_RESTORE_BASE, 182 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, 183 WORKINGSET_RESTORE_FILE, 184 WORKINGSET_NODERECLAIM, 185 NR_ANON_MAPPED, /* Mapped anonymous pages */ 186 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 187 only modified from process context */ 188 NR_FILE_PAGES, 189 NR_FILE_DIRTY, 190 NR_WRITEBACK, 191 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 192 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 193 NR_SHMEM_THPS, 194 NR_SHMEM_PMDMAPPED, 195 NR_FILE_THPS, 196 NR_FILE_PMDMAPPED, 197 NR_ANON_THPS, 198 NR_VMSCAN_WRITE, 199 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 200 NR_DIRTIED, /* page dirtyings since bootup */ 201 NR_WRITTEN, /* page writings since bootup */ 202 NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ 203 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 204 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ 205 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ 206 NR_KERNEL_STACK_KB, /* measured in KiB */ 207 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 208 NR_KERNEL_SCS_KB, /* measured in KiB */ 209 #endif 210 NR_PAGETABLE, /* used for pagetables */ 211 #ifdef CONFIG_SWAP 212 NR_SWAPCACHE, 213 #endif 214 NR_VM_NODE_STAT_ITEMS 215 }; 216 217 /* 218 * Returns true if the item should be printed in THPs (/proc/vmstat 219 * currently prints number of anon, file and shmem THPs. But the item 220 * is charged in pages). 221 */ 222 static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) 223 { 224 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 225 return false; 226 227 return item == NR_ANON_THPS || 228 item == NR_FILE_THPS || 229 item == NR_SHMEM_THPS || 230 item == NR_SHMEM_PMDMAPPED || 231 item == NR_FILE_PMDMAPPED; 232 } 233 234 /* 235 * Returns true if the value is measured in bytes (most vmstat values are 236 * measured in pages). This defines the API part, the internal representation 237 * might be different. 238 */ 239 static __always_inline bool vmstat_item_in_bytes(int idx) 240 { 241 /* 242 * Global and per-node slab counters track slab pages. 243 * It's expected that changes are multiples of PAGE_SIZE. 244 * Internally values are stored in pages. 245 * 246 * Per-memcg and per-lruvec counters track memory, consumed 247 * by individual slab objects. These counters are actually 248 * byte-precise. 249 */ 250 return (idx == NR_SLAB_RECLAIMABLE_B || 251 idx == NR_SLAB_UNRECLAIMABLE_B); 252 } 253 254 /* 255 * We do arithmetic on the LRU lists in various places in the code, 256 * so it is important to keep the active lists LRU_ACTIVE higher in 257 * the array than the corresponding inactive lists, and to keep 258 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 259 * 260 * This has to be kept in sync with the statistics in zone_stat_item 261 * above and the descriptions in vmstat_text in mm/vmstat.c 262 */ 263 #define LRU_BASE 0 264 #define LRU_ACTIVE 1 265 #define LRU_FILE 2 266 267 enum lru_list { 268 LRU_INACTIVE_ANON = LRU_BASE, 269 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 270 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 271 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 272 LRU_UNEVICTABLE, 273 NR_LRU_LISTS 274 }; 275 276 enum vmscan_throttle_state { 277 VMSCAN_THROTTLE_WRITEBACK, 278 VMSCAN_THROTTLE_ISOLATED, 279 VMSCAN_THROTTLE_NOPROGRESS, 280 NR_VMSCAN_THROTTLE, 281 }; 282 283 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 284 285 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 286 287 static inline bool is_file_lru(enum lru_list lru) 288 { 289 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 290 } 291 292 static inline bool is_active_lru(enum lru_list lru) 293 { 294 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 295 } 296 297 #define ANON_AND_FILE 2 298 299 enum lruvec_flags { 300 LRUVEC_CONGESTED, /* lruvec has many dirty pages 301 * backed by a congested BDI 302 */ 303 }; 304 305 struct lruvec { 306 struct list_head lists[NR_LRU_LISTS]; 307 /* per lruvec lru_lock for memcg */ 308 spinlock_t lru_lock; 309 /* 310 * These track the cost of reclaiming one LRU - file or anon - 311 * over the other. As the observed cost of reclaiming one LRU 312 * increases, the reclaim scan balance tips toward the other. 313 */ 314 unsigned long anon_cost; 315 unsigned long file_cost; 316 /* Non-resident age, driven by LRU movement */ 317 atomic_long_t nonresident_age; 318 /* Refaults at the time of last reclaim cycle */ 319 unsigned long refaults[ANON_AND_FILE]; 320 /* Various lruvec state flags (enum lruvec_flags) */ 321 unsigned long flags; 322 #ifdef CONFIG_MEMCG 323 struct pglist_data *pgdat; 324 #endif 325 }; 326 327 /* Isolate unmapped pages */ 328 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) 329 /* Isolate for asynchronous migration */ 330 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 331 /* Isolate unevictable pages */ 332 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 333 334 /* LRU Isolation modes. */ 335 typedef unsigned __bitwise isolate_mode_t; 336 337 enum zone_watermarks { 338 WMARK_MIN, 339 WMARK_LOW, 340 WMARK_HIGH, 341 NR_WMARK 342 }; 343 344 /* 345 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional 346 * for pageblock size for THP if configured. 347 */ 348 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 349 #define NR_PCP_THP 1 350 #else 351 #define NR_PCP_THP 0 352 #endif 353 #define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP)) 354 355 /* 356 * Shift to encode migratetype and order in the same integer, with order 357 * in the least significant bits. 358 */ 359 #define NR_PCP_ORDER_WIDTH 8 360 #define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1) 361 362 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) 363 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) 364 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) 365 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) 366 367 /* Fields and list protected by pagesets local_lock in page_alloc.c */ 368 struct per_cpu_pages { 369 int count; /* number of pages in the list */ 370 int high; /* high watermark, emptying needed */ 371 int batch; /* chunk size for buddy add/remove */ 372 short free_factor; /* batch scaling factor during free */ 373 #ifdef CONFIG_NUMA 374 short expire; /* When 0, remote pagesets are drained */ 375 #endif 376 377 /* Lists of pages, one per migrate type stored on the pcp-lists */ 378 struct list_head lists[NR_PCP_LISTS]; 379 }; 380 381 struct per_cpu_zonestat { 382 #ifdef CONFIG_SMP 383 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 384 s8 stat_threshold; 385 #endif 386 #ifdef CONFIG_NUMA 387 /* 388 * Low priority inaccurate counters that are only folded 389 * on demand. Use a large type to avoid the overhead of 390 * folding during refresh_cpu_vm_stats. 391 */ 392 unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; 393 #endif 394 }; 395 396 struct per_cpu_nodestat { 397 s8 stat_threshold; 398 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 399 }; 400 401 #endif /* !__GENERATING_BOUNDS.H */ 402 403 enum zone_type { 404 /* 405 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able 406 * to DMA to all of the addressable memory (ZONE_NORMAL). 407 * On architectures where this area covers the whole 32 bit address 408 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller 409 * DMA addressing constraints. This distinction is important as a 32bit 410 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit 411 * platforms may need both zones as they support peripherals with 412 * different DMA addressing limitations. 413 */ 414 #ifdef CONFIG_ZONE_DMA 415 ZONE_DMA, 416 #endif 417 #ifdef CONFIG_ZONE_DMA32 418 ZONE_DMA32, 419 #endif 420 /* 421 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 422 * performed on pages in ZONE_NORMAL if the DMA devices support 423 * transfers to all addressable memory. 424 */ 425 ZONE_NORMAL, 426 #ifdef CONFIG_HIGHMEM 427 /* 428 * A memory area that is only addressable by the kernel through 429 * mapping portions into its own address space. This is for example 430 * used by i386 to allow the kernel to address the memory beyond 431 * 900MB. The kernel will set up special mappings (page 432 * table entries on i386) for each page that the kernel needs to 433 * access. 434 */ 435 ZONE_HIGHMEM, 436 #endif 437 /* 438 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains 439 * movable pages with few exceptional cases described below. Main use 440 * cases for ZONE_MOVABLE are to make memory offlining/unplug more 441 * likely to succeed, and to locally limit unmovable allocations - e.g., 442 * to increase the number of THP/huge pages. Notable special cases are: 443 * 444 * 1. Pinned pages: (long-term) pinning of movable pages might 445 * essentially turn such pages unmovable. Therefore, we do not allow 446 * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and 447 * faulted, they come from the right zone right away. However, it is 448 * still possible that address space already has pages in 449 * ZONE_MOVABLE at the time when pages are pinned (i.e. user has 450 * touches that memory before pinning). In such case we migrate them 451 * to a different zone. When migration fails - pinning fails. 452 * 2. memblock allocations: kernelcore/movablecore setups might create 453 * situations where ZONE_MOVABLE contains unmovable allocations 454 * after boot. Memory offlining and allocations fail early. 455 * 3. Memory holes: kernelcore/movablecore setups might create very rare 456 * situations where ZONE_MOVABLE contains memory holes after boot, 457 * for example, if we have sections that are only partially 458 * populated. Memory offlining and allocations fail early. 459 * 4. PG_hwpoison pages: while poisoned pages can be skipped during 460 * memory offlining, such pages cannot be allocated. 461 * 5. Unmovable PG_offline pages: in paravirtualized environments, 462 * hotplugged memory blocks might only partially be managed by the 463 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The 464 * parts not manged by the buddy are unmovable PG_offline pages. In 465 * some cases (virtio-mem), such pages can be skipped during 466 * memory offlining, however, cannot be moved/allocated. These 467 * techniques might use alloc_contig_range() to hide previously 468 * exposed pages from the buddy again (e.g., to implement some sort 469 * of memory unplug in virtio-mem). 470 * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create 471 * situations where ZERO_PAGE(0) which is allocated differently 472 * on different platforms may end up in a movable zone. ZERO_PAGE(0) 473 * cannot be migrated. 474 * 7. Memory-hotplug: when using memmap_on_memory and onlining the 475 * memory to the MOVABLE zone, the vmemmap pages are also placed in 476 * such zone. Such pages cannot be really moved around as they are 477 * self-stored in the range, but they are treated as movable when 478 * the range they describe is about to be offlined. 479 * 480 * In general, no unmovable allocations that degrade memory offlining 481 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) 482 * have to expect that migrating pages in ZONE_MOVABLE can fail (even 483 * if has_unmovable_pages() states that there are no unmovable pages, 484 * there can be false negatives). 485 */ 486 ZONE_MOVABLE, 487 #ifdef CONFIG_ZONE_DEVICE 488 ZONE_DEVICE, 489 #endif 490 __MAX_NR_ZONES 491 492 }; 493 494 #ifndef __GENERATING_BOUNDS_H 495 496 #define ASYNC_AND_SYNC 2 497 498 struct zone { 499 /* Read-mostly fields */ 500 501 /* zone watermarks, access with *_wmark_pages(zone) macros */ 502 unsigned long _watermark[NR_WMARK]; 503 unsigned long watermark_boost; 504 505 unsigned long nr_reserved_highatomic; 506 507 /* 508 * We don't know if the memory that we're going to allocate will be 509 * freeable or/and it will be released eventually, so to avoid totally 510 * wasting several GB of ram we must reserve some of the lower zone 511 * memory (otherwise we risk to run OOM on the lower zones despite 512 * there being tons of freeable ram on the higher zones). This array is 513 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 514 * changes. 515 */ 516 long lowmem_reserve[MAX_NR_ZONES]; 517 518 #ifdef CONFIG_NUMA 519 int node; 520 #endif 521 struct pglist_data *zone_pgdat; 522 struct per_cpu_pages __percpu *per_cpu_pageset; 523 struct per_cpu_zonestat __percpu *per_cpu_zonestats; 524 /* 525 * the high and batch values are copied to individual pagesets for 526 * faster access 527 */ 528 int pageset_high; 529 int pageset_batch; 530 531 #ifndef CONFIG_SPARSEMEM 532 /* 533 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 534 * In SPARSEMEM, this map is stored in struct mem_section 535 */ 536 unsigned long *pageblock_flags; 537 #endif /* CONFIG_SPARSEMEM */ 538 539 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 540 unsigned long zone_start_pfn; 541 542 /* 543 * spanned_pages is the total pages spanned by the zone, including 544 * holes, which is calculated as: 545 * spanned_pages = zone_end_pfn - zone_start_pfn; 546 * 547 * present_pages is physical pages existing within the zone, which 548 * is calculated as: 549 * present_pages = spanned_pages - absent_pages(pages in holes); 550 * 551 * present_early_pages is present pages existing within the zone 552 * located on memory available since early boot, excluding hotplugged 553 * memory. 554 * 555 * managed_pages is present pages managed by the buddy system, which 556 * is calculated as (reserved_pages includes pages allocated by the 557 * bootmem allocator): 558 * managed_pages = present_pages - reserved_pages; 559 * 560 * cma pages is present pages that are assigned for CMA use 561 * (MIGRATE_CMA). 562 * 563 * So present_pages may be used by memory hotplug or memory power 564 * management logic to figure out unmanaged pages by checking 565 * (present_pages - managed_pages). And managed_pages should be used 566 * by page allocator and vm scanner to calculate all kinds of watermarks 567 * and thresholds. 568 * 569 * Locking rules: 570 * 571 * zone_start_pfn and spanned_pages are protected by span_seqlock. 572 * It is a seqlock because it has to be read outside of zone->lock, 573 * and it is done in the main allocator path. But, it is written 574 * quite infrequently. 575 * 576 * The span_seq lock is declared along with zone->lock because it is 577 * frequently read in proximity to zone->lock. It's good to 578 * give them a chance of being in the same cacheline. 579 * 580 * Write access to present_pages at runtime should be protected by 581 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of 582 * present_pages should get_online_mems() to get a stable value. 583 */ 584 atomic_long_t managed_pages; 585 unsigned long spanned_pages; 586 unsigned long present_pages; 587 #if defined(CONFIG_MEMORY_HOTPLUG) 588 unsigned long present_early_pages; 589 #endif 590 #ifdef CONFIG_CMA 591 unsigned long cma_pages; 592 #endif 593 594 const char *name; 595 596 #ifdef CONFIG_MEMORY_ISOLATION 597 /* 598 * Number of isolated pageblock. It is used to solve incorrect 599 * freepage counting problem due to racy retrieving migratetype 600 * of pageblock. Protected by zone->lock. 601 */ 602 unsigned long nr_isolate_pageblock; 603 #endif 604 605 #ifdef CONFIG_MEMORY_HOTPLUG 606 /* see spanned/present_pages for more description */ 607 seqlock_t span_seqlock; 608 #endif 609 610 int initialized; 611 612 /* Write-intensive fields used from the page allocator */ 613 ZONE_PADDING(_pad1_) 614 615 /* free areas of different sizes */ 616 struct free_area free_area[MAX_ORDER]; 617 618 /* zone flags, see below */ 619 unsigned long flags; 620 621 /* Primarily protects free_area */ 622 spinlock_t lock; 623 624 /* Write-intensive fields used by compaction and vmstats. */ 625 ZONE_PADDING(_pad2_) 626 627 /* 628 * When free pages are below this point, additional steps are taken 629 * when reading the number of free pages to avoid per-cpu counter 630 * drift allowing watermarks to be breached 631 */ 632 unsigned long percpu_drift_mark; 633 634 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 635 /* pfn where compaction free scanner should start */ 636 unsigned long compact_cached_free_pfn; 637 /* pfn where compaction migration scanner should start */ 638 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; 639 unsigned long compact_init_migrate_pfn; 640 unsigned long compact_init_free_pfn; 641 #endif 642 643 #ifdef CONFIG_COMPACTION 644 /* 645 * On compaction failure, 1<<compact_defer_shift compactions 646 * are skipped before trying again. The number attempted since 647 * last failure is tracked with compact_considered. 648 * compact_order_failed is the minimum compaction failed order. 649 */ 650 unsigned int compact_considered; 651 unsigned int compact_defer_shift; 652 int compact_order_failed; 653 #endif 654 655 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 656 /* Set to true when the PG_migrate_skip bits should be cleared */ 657 bool compact_blockskip_flush; 658 #endif 659 660 bool contiguous; 661 662 ZONE_PADDING(_pad3_) 663 /* Zone statistics */ 664 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 665 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; 666 } ____cacheline_internodealigned_in_smp; 667 668 enum pgdat_flags { 669 PGDAT_DIRTY, /* reclaim scanning has recently found 670 * many dirty file pages at the tail 671 * of the LRU. 672 */ 673 PGDAT_WRITEBACK, /* reclaim scanning has recently found 674 * many pages under writeback 675 */ 676 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 677 }; 678 679 enum zone_flags { 680 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 681 * Cleared when kswapd is woken. 682 */ 683 ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ 684 }; 685 686 static inline unsigned long zone_managed_pages(struct zone *zone) 687 { 688 return (unsigned long)atomic_long_read(&zone->managed_pages); 689 } 690 691 static inline unsigned long zone_cma_pages(struct zone *zone) 692 { 693 #ifdef CONFIG_CMA 694 return zone->cma_pages; 695 #else 696 return 0; 697 #endif 698 } 699 700 static inline unsigned long zone_end_pfn(const struct zone *zone) 701 { 702 return zone->zone_start_pfn + zone->spanned_pages; 703 } 704 705 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 706 { 707 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 708 } 709 710 static inline bool zone_is_initialized(struct zone *zone) 711 { 712 return zone->initialized; 713 } 714 715 static inline bool zone_is_empty(struct zone *zone) 716 { 717 return zone->spanned_pages == 0; 718 } 719 720 /* 721 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 722 * intersection with the given zone 723 */ 724 static inline bool zone_intersects(struct zone *zone, 725 unsigned long start_pfn, unsigned long nr_pages) 726 { 727 if (zone_is_empty(zone)) 728 return false; 729 if (start_pfn >= zone_end_pfn(zone) || 730 start_pfn + nr_pages <= zone->zone_start_pfn) 731 return false; 732 733 return true; 734 } 735 736 /* 737 * The "priority" of VM scanning is how much of the queues we will scan in one 738 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 739 * queues ("queue_length >> 12") during an aging round. 740 */ 741 #define DEF_PRIORITY 12 742 743 /* Maximum number of zones on a zonelist */ 744 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 745 746 enum { 747 ZONELIST_FALLBACK, /* zonelist with fallback */ 748 #ifdef CONFIG_NUMA 749 /* 750 * The NUMA zonelists are doubled because we need zonelists that 751 * restrict the allocations to a single node for __GFP_THISNODE. 752 */ 753 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 754 #endif 755 MAX_ZONELISTS 756 }; 757 758 /* 759 * This struct contains information about a zone in a zonelist. It is stored 760 * here to avoid dereferences into large structures and lookups of tables 761 */ 762 struct zoneref { 763 struct zone *zone; /* Pointer to actual zone */ 764 int zone_idx; /* zone_idx(zoneref->zone) */ 765 }; 766 767 /* 768 * One allocation request operates on a zonelist. A zonelist 769 * is a list of zones, the first one is the 'goal' of the 770 * allocation, the other zones are fallback zones, in decreasing 771 * priority. 772 * 773 * To speed the reading of the zonelist, the zonerefs contain the zone index 774 * of the entry being read. Helper functions to access information given 775 * a struct zoneref are 776 * 777 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 778 * zonelist_zone_idx() - Return the index of the zone for an entry 779 * zonelist_node_idx() - Return the index of the node for an entry 780 */ 781 struct zonelist { 782 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 783 }; 784 785 /* 786 * The array of struct pages for flatmem. 787 * It must be declared for SPARSEMEM as well because there are configurations 788 * that rely on that. 789 */ 790 extern struct page *mem_map; 791 792 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 793 struct deferred_split { 794 spinlock_t split_queue_lock; 795 struct list_head split_queue; 796 unsigned long split_queue_len; 797 }; 798 #endif 799 800 /* 801 * On NUMA machines, each NUMA node would have a pg_data_t to describe 802 * it's memory layout. On UMA machines there is a single pglist_data which 803 * describes the whole memory. 804 * 805 * Memory statistics and page replacement data structures are maintained on a 806 * per-zone basis. 807 */ 808 typedef struct pglist_data { 809 /* 810 * node_zones contains just the zones for THIS node. Not all of the 811 * zones may be populated, but it is the full list. It is referenced by 812 * this node's node_zonelists as well as other node's node_zonelists. 813 */ 814 struct zone node_zones[MAX_NR_ZONES]; 815 816 /* 817 * node_zonelists contains references to all zones in all nodes. 818 * Generally the first zones will be references to this node's 819 * node_zones. 820 */ 821 struct zonelist node_zonelists[MAX_ZONELISTS]; 822 823 int nr_zones; /* number of populated zones in this node */ 824 #ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ 825 struct page *node_mem_map; 826 #ifdef CONFIG_PAGE_EXTENSION 827 struct page_ext *node_page_ext; 828 #endif 829 #endif 830 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 831 /* 832 * Must be held any time you expect node_start_pfn, 833 * node_present_pages, node_spanned_pages or nr_zones to stay constant. 834 * Also synchronizes pgdat->first_deferred_pfn during deferred page 835 * init. 836 * 837 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 838 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 839 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 840 * 841 * Nests above zone->lock and zone->span_seqlock 842 */ 843 spinlock_t node_size_lock; 844 #endif 845 unsigned long node_start_pfn; 846 unsigned long node_present_pages; /* total number of physical pages */ 847 unsigned long node_spanned_pages; /* total size of physical page 848 range, including holes */ 849 int node_id; 850 wait_queue_head_t kswapd_wait; 851 wait_queue_head_t pfmemalloc_wait; 852 853 /* workqueues for throttling reclaim for different reasons. */ 854 wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; 855 856 atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ 857 unsigned long nr_reclaim_start; /* nr pages written while throttled 858 * when throttling started. */ 859 struct task_struct *kswapd; /* Protected by 860 mem_hotplug_begin/end() */ 861 int kswapd_order; 862 enum zone_type kswapd_highest_zoneidx; 863 864 int kswapd_failures; /* Number of 'reclaimed == 0' runs */ 865 866 #ifdef CONFIG_COMPACTION 867 int kcompactd_max_order; 868 enum zone_type kcompactd_highest_zoneidx; 869 wait_queue_head_t kcompactd_wait; 870 struct task_struct *kcompactd; 871 bool proactive_compact_trigger; 872 #endif 873 /* 874 * This is a per-node reserve of pages that are not available 875 * to userspace allocations. 876 */ 877 unsigned long totalreserve_pages; 878 879 #ifdef CONFIG_NUMA 880 /* 881 * node reclaim becomes active if more unmapped pages exist. 882 */ 883 unsigned long min_unmapped_pages; 884 unsigned long min_slab_pages; 885 #endif /* CONFIG_NUMA */ 886 887 /* Write-intensive fields used by page reclaim */ 888 ZONE_PADDING(_pad1_) 889 890 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 891 /* 892 * If memory initialisation on large machines is deferred then this 893 * is the first PFN that needs to be initialised. 894 */ 895 unsigned long first_deferred_pfn; 896 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 897 898 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 899 struct deferred_split deferred_split_queue; 900 #endif 901 902 /* Fields commonly accessed by the page reclaim scanner */ 903 904 /* 905 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. 906 * 907 * Use mem_cgroup_lruvec() to look up lruvecs. 908 */ 909 struct lruvec __lruvec; 910 911 unsigned long flags; 912 913 ZONE_PADDING(_pad2_) 914 915 /* Per-node vmstats */ 916 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 917 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 918 } pg_data_t; 919 920 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 921 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 922 #ifdef CONFIG_FLATMEM 923 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 924 #else 925 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 926 #endif 927 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 928 929 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 930 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 931 932 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 933 { 934 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 935 } 936 937 static inline bool pgdat_is_empty(pg_data_t *pgdat) 938 { 939 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 940 } 941 942 #include <linux/memory_hotplug.h> 943 944 void build_all_zonelists(pg_data_t *pgdat); 945 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 946 enum zone_type highest_zoneidx); 947 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 948 int highest_zoneidx, unsigned int alloc_flags, 949 long free_pages); 950 bool zone_watermark_ok(struct zone *z, unsigned int order, 951 unsigned long mark, int highest_zoneidx, 952 unsigned int alloc_flags); 953 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 954 unsigned long mark, int highest_zoneidx); 955 /* 956 * Memory initialization context, use to differentiate memory added by 957 * the platform statically or via memory hotplug interface. 958 */ 959 enum meminit_context { 960 MEMINIT_EARLY, 961 MEMINIT_HOTPLUG, 962 }; 963 964 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 965 unsigned long size); 966 967 extern void lruvec_init(struct lruvec *lruvec); 968 969 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 970 { 971 #ifdef CONFIG_MEMCG 972 return lruvec->pgdat; 973 #else 974 return container_of(lruvec, struct pglist_data, __lruvec); 975 #endif 976 } 977 978 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 979 int local_memory_node(int node_id); 980 #else 981 static inline int local_memory_node(int node_id) { return node_id; }; 982 #endif 983 984 /* 985 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 986 */ 987 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 988 989 #ifdef CONFIG_ZONE_DEVICE 990 static inline bool zone_is_zone_device(struct zone *zone) 991 { 992 return zone_idx(zone) == ZONE_DEVICE; 993 } 994 #else 995 static inline bool zone_is_zone_device(struct zone *zone) 996 { 997 return false; 998 } 999 #endif 1000 1001 /* 1002 * Returns true if a zone has pages managed by the buddy allocator. 1003 * All the reclaim decisions have to use this function rather than 1004 * populated_zone(). If the whole zone is reserved then we can easily 1005 * end up with populated_zone() && !managed_zone(). 1006 */ 1007 static inline bool managed_zone(struct zone *zone) 1008 { 1009 return zone_managed_pages(zone); 1010 } 1011 1012 /* Returns true if a zone has memory */ 1013 static inline bool populated_zone(struct zone *zone) 1014 { 1015 return zone->present_pages; 1016 } 1017 1018 #ifdef CONFIG_NUMA 1019 static inline int zone_to_nid(struct zone *zone) 1020 { 1021 return zone->node; 1022 } 1023 1024 static inline void zone_set_nid(struct zone *zone, int nid) 1025 { 1026 zone->node = nid; 1027 } 1028 #else 1029 static inline int zone_to_nid(struct zone *zone) 1030 { 1031 return 0; 1032 } 1033 1034 static inline void zone_set_nid(struct zone *zone, int nid) {} 1035 #endif 1036 1037 extern int movable_zone; 1038 1039 static inline int is_highmem_idx(enum zone_type idx) 1040 { 1041 #ifdef CONFIG_HIGHMEM 1042 return (idx == ZONE_HIGHMEM || 1043 (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); 1044 #else 1045 return 0; 1046 #endif 1047 } 1048 1049 /** 1050 * is_highmem - helper function to quickly check if a struct zone is a 1051 * highmem zone or not. This is an attempt to keep references 1052 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 1053 * @zone: pointer to struct zone variable 1054 * Return: 1 for a highmem zone, 0 otherwise 1055 */ 1056 static inline int is_highmem(struct zone *zone) 1057 { 1058 #ifdef CONFIG_HIGHMEM 1059 return is_highmem_idx(zone_idx(zone)); 1060 #else 1061 return 0; 1062 #endif 1063 } 1064 1065 /* These two functions are used to setup the per zone pages min values */ 1066 struct ctl_table; 1067 1068 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, 1069 loff_t *); 1070 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, 1071 size_t *, loff_t *); 1072 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; 1073 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, 1074 size_t *, loff_t *); 1075 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, 1076 void *, size_t *, loff_t *); 1077 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 1078 void *, size_t *, loff_t *); 1079 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 1080 void *, size_t *, loff_t *); 1081 int numa_zonelist_order_handler(struct ctl_table *, int, 1082 void *, size_t *, loff_t *); 1083 extern int percpu_pagelist_high_fraction; 1084 extern char numa_zonelist_order[]; 1085 #define NUMA_ZONELIST_ORDER_LEN 16 1086 1087 #ifndef CONFIG_NUMA 1088 1089 extern struct pglist_data contig_page_data; 1090 static inline struct pglist_data *NODE_DATA(int nid) 1091 { 1092 return &contig_page_data; 1093 } 1094 #define NODE_MEM_MAP(nid) mem_map 1095 1096 #else /* CONFIG_NUMA */ 1097 1098 #include <asm/mmzone.h> 1099 1100 #endif /* !CONFIG_NUMA */ 1101 1102 extern struct pglist_data *first_online_pgdat(void); 1103 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 1104 extern struct zone *next_zone(struct zone *zone); 1105 1106 /** 1107 * for_each_online_pgdat - helper macro to iterate over all online nodes 1108 * @pgdat: pointer to a pg_data_t variable 1109 */ 1110 #define for_each_online_pgdat(pgdat) \ 1111 for (pgdat = first_online_pgdat(); \ 1112 pgdat; \ 1113 pgdat = next_online_pgdat(pgdat)) 1114 /** 1115 * for_each_zone - helper macro to iterate over all memory zones 1116 * @zone: pointer to struct zone variable 1117 * 1118 * The user only needs to declare the zone variable, for_each_zone 1119 * fills it in. 1120 */ 1121 #define for_each_zone(zone) \ 1122 for (zone = (first_online_pgdat())->node_zones; \ 1123 zone; \ 1124 zone = next_zone(zone)) 1125 1126 #define for_each_populated_zone(zone) \ 1127 for (zone = (first_online_pgdat())->node_zones; \ 1128 zone; \ 1129 zone = next_zone(zone)) \ 1130 if (!populated_zone(zone)) \ 1131 ; /* do nothing */ \ 1132 else 1133 1134 static inline struct zone *zonelist_zone(struct zoneref *zoneref) 1135 { 1136 return zoneref->zone; 1137 } 1138 1139 static inline int zonelist_zone_idx(struct zoneref *zoneref) 1140 { 1141 return zoneref->zone_idx; 1142 } 1143 1144 static inline int zonelist_node_idx(struct zoneref *zoneref) 1145 { 1146 return zone_to_nid(zoneref->zone); 1147 } 1148 1149 struct zoneref *__next_zones_zonelist(struct zoneref *z, 1150 enum zone_type highest_zoneidx, 1151 nodemask_t *nodes); 1152 1153 /** 1154 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1155 * @z: The cursor used as a starting point for the search 1156 * @highest_zoneidx: The zone index of the highest zone to return 1157 * @nodes: An optional nodemask to filter the zonelist with 1158 * 1159 * This function returns the next zone at or below a given zone index that is 1160 * within the allowed nodemask using a cursor as the starting point for the 1161 * search. The zoneref returned is a cursor that represents the current zone 1162 * being examined. It should be advanced by one before calling 1163 * next_zones_zonelist again. 1164 * 1165 * Return: the next zone at or below highest_zoneidx within the allowed 1166 * nodemask using a cursor within a zonelist as a starting point 1167 */ 1168 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 1169 enum zone_type highest_zoneidx, 1170 nodemask_t *nodes) 1171 { 1172 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 1173 return z; 1174 return __next_zones_zonelist(z, highest_zoneidx, nodes); 1175 } 1176 1177 /** 1178 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1179 * @zonelist: The zonelist to search for a suitable zone 1180 * @highest_zoneidx: The zone index of the highest zone to return 1181 * @nodes: An optional nodemask to filter the zonelist with 1182 * 1183 * This function returns the first zone at or below a given zone index that is 1184 * within the allowed nodemask. The zoneref returned is a cursor that can be 1185 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1186 * one before calling. 1187 * 1188 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1189 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1190 * update due to cpuset modification. 1191 * 1192 * Return: Zoneref pointer for the first suitable zone found 1193 */ 1194 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1195 enum zone_type highest_zoneidx, 1196 nodemask_t *nodes) 1197 { 1198 return next_zones_zonelist(zonelist->_zonerefs, 1199 highest_zoneidx, nodes); 1200 } 1201 1202 /** 1203 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1204 * @zone: The current zone in the iterator 1205 * @z: The current pointer within zonelist->_zonerefs being iterated 1206 * @zlist: The zonelist being iterated 1207 * @highidx: The zone index of the highest zone to return 1208 * @nodemask: Nodemask allowed by the allocator 1209 * 1210 * This iterator iterates though all zones at or below a given zone index and 1211 * within a given nodemask 1212 */ 1213 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1214 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1215 zone; \ 1216 z = next_zones_zonelist(++z, highidx, nodemask), \ 1217 zone = zonelist_zone(z)) 1218 1219 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ 1220 for (zone = z->zone; \ 1221 zone; \ 1222 z = next_zones_zonelist(++z, highidx, nodemask), \ 1223 zone = zonelist_zone(z)) 1224 1225 1226 /** 1227 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1228 * @zone: The current zone in the iterator 1229 * @z: The current pointer within zonelist->zones being iterated 1230 * @zlist: The zonelist being iterated 1231 * @highidx: The zone index of the highest zone to return 1232 * 1233 * This iterator iterates though all zones at or below a given zone index. 1234 */ 1235 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1236 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1237 1238 /* Whether the 'nodes' are all movable nodes */ 1239 static inline bool movable_only_nodes(nodemask_t *nodes) 1240 { 1241 struct zonelist *zonelist; 1242 struct zoneref *z; 1243 int nid; 1244 1245 if (nodes_empty(*nodes)) 1246 return false; 1247 1248 /* 1249 * We can chose arbitrary node from the nodemask to get a 1250 * zonelist as they are interlinked. We just need to find 1251 * at least one zone that can satisfy kernel allocations. 1252 */ 1253 nid = first_node(*nodes); 1254 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; 1255 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); 1256 return (!z->zone) ? true : false; 1257 } 1258 1259 1260 #ifdef CONFIG_SPARSEMEM 1261 #include <asm/sparsemem.h> 1262 #endif 1263 1264 #ifdef CONFIG_FLATMEM 1265 #define pfn_to_nid(pfn) (0) 1266 #endif 1267 1268 #ifdef CONFIG_SPARSEMEM 1269 1270 /* 1271 * PA_SECTION_SHIFT physical address to/from section number 1272 * PFN_SECTION_SHIFT pfn to/from section number 1273 */ 1274 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1275 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1276 1277 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1278 1279 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1280 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1281 1282 #define SECTION_BLOCKFLAGS_BITS \ 1283 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1284 1285 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1286 #error Allocator MAX_ORDER exceeds SECTION_SIZE 1287 #endif 1288 1289 static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1290 { 1291 return pfn >> PFN_SECTION_SHIFT; 1292 } 1293 static inline unsigned long section_nr_to_pfn(unsigned long sec) 1294 { 1295 return sec << PFN_SECTION_SHIFT; 1296 } 1297 1298 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1299 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1300 1301 #define SUBSECTION_SHIFT 21 1302 #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) 1303 1304 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) 1305 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) 1306 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) 1307 1308 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS 1309 #error Subsection size exceeds section size 1310 #else 1311 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) 1312 #endif 1313 1314 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) 1315 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) 1316 1317 struct mem_section_usage { 1318 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1319 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); 1320 #endif 1321 /* See declaration of similar field in struct zone */ 1322 unsigned long pageblock_flags[0]; 1323 }; 1324 1325 void subsection_map_init(unsigned long pfn, unsigned long nr_pages); 1326 1327 struct page; 1328 struct page_ext; 1329 struct mem_section { 1330 /* 1331 * This is, logically, a pointer to an array of struct 1332 * pages. However, it is stored with some other magic. 1333 * (see sparse.c::sparse_init_one_section()) 1334 * 1335 * Additionally during early boot we encode node id of 1336 * the location of the section here to guide allocation. 1337 * (see sparse.c::memory_present()) 1338 * 1339 * Making it a UL at least makes someone do a cast 1340 * before using it wrong. 1341 */ 1342 unsigned long section_mem_map; 1343 1344 struct mem_section_usage *usage; 1345 #ifdef CONFIG_PAGE_EXTENSION 1346 /* 1347 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 1348 * section. (see page_ext.h about this.) 1349 */ 1350 struct page_ext *page_ext; 1351 unsigned long pad; 1352 #endif 1353 /* 1354 * WARNING: mem_section must be a power-of-2 in size for the 1355 * calculation and use of SECTION_ROOT_MASK to make sense. 1356 */ 1357 }; 1358 1359 #ifdef CONFIG_SPARSEMEM_EXTREME 1360 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1361 #else 1362 #define SECTIONS_PER_ROOT 1 1363 #endif 1364 1365 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1366 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1367 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1368 1369 #ifdef CONFIG_SPARSEMEM_EXTREME 1370 extern struct mem_section **mem_section; 1371 #else 1372 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1373 #endif 1374 1375 static inline unsigned long *section_to_usemap(struct mem_section *ms) 1376 { 1377 return ms->usage->pageblock_flags; 1378 } 1379 1380 static inline struct mem_section *__nr_to_section(unsigned long nr) 1381 { 1382 #ifdef CONFIG_SPARSEMEM_EXTREME 1383 if (!mem_section) 1384 return NULL; 1385 #endif 1386 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 1387 return NULL; 1388 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 1389 } 1390 extern size_t mem_section_usage_size(void); 1391 1392 /* 1393 * We use the lower bits of the mem_map pointer to store 1394 * a little bit of information. The pointer is calculated 1395 * as mem_map - section_nr_to_pfn(pnum). The result is 1396 * aligned to the minimum alignment of the two values: 1397 * 1. All mem_map arrays are page-aligned. 1398 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 1399 * lowest bits. PFN_SECTION_SHIFT is arch-specific 1400 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 1401 * worst combination is powerpc with 256k pages, 1402 * which results in PFN_SECTION_SHIFT equal 6. 1403 * To sum it up, at least 6 bits are available. 1404 */ 1405 #define SECTION_MARKED_PRESENT (1UL<<0) 1406 #define SECTION_HAS_MEM_MAP (1UL<<1) 1407 #define SECTION_IS_ONLINE (1UL<<2) 1408 #define SECTION_IS_EARLY (1UL<<3) 1409 #define SECTION_TAINT_ZONE_DEVICE (1UL<<4) 1410 #define SECTION_MAP_LAST_BIT (1UL<<5) 1411 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1412 #define SECTION_NID_SHIFT 6 1413 1414 static inline struct page *__section_mem_map_addr(struct mem_section *section) 1415 { 1416 unsigned long map = section->section_mem_map; 1417 map &= SECTION_MAP_MASK; 1418 return (struct page *)map; 1419 } 1420 1421 static inline int present_section(struct mem_section *section) 1422 { 1423 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 1424 } 1425 1426 static inline int present_section_nr(unsigned long nr) 1427 { 1428 return present_section(__nr_to_section(nr)); 1429 } 1430 1431 static inline int valid_section(struct mem_section *section) 1432 { 1433 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 1434 } 1435 1436 static inline int early_section(struct mem_section *section) 1437 { 1438 return (section && (section->section_mem_map & SECTION_IS_EARLY)); 1439 } 1440 1441 static inline int valid_section_nr(unsigned long nr) 1442 { 1443 return valid_section(__nr_to_section(nr)); 1444 } 1445 1446 static inline int online_section(struct mem_section *section) 1447 { 1448 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 1449 } 1450 1451 static inline int online_device_section(struct mem_section *section) 1452 { 1453 unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; 1454 1455 return section && ((section->section_mem_map & flags) == flags); 1456 } 1457 1458 static inline int online_section_nr(unsigned long nr) 1459 { 1460 return online_section(__nr_to_section(nr)); 1461 } 1462 1463 #ifdef CONFIG_MEMORY_HOTPLUG 1464 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1465 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1466 #endif 1467 1468 static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1469 { 1470 return __nr_to_section(pfn_to_section_nr(pfn)); 1471 } 1472 1473 extern unsigned long __highest_present_section_nr; 1474 1475 static inline int subsection_map_index(unsigned long pfn) 1476 { 1477 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; 1478 } 1479 1480 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1481 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1482 { 1483 int idx = subsection_map_index(pfn); 1484 1485 return test_bit(idx, ms->usage->subsection_map); 1486 } 1487 #else 1488 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1489 { 1490 return 1; 1491 } 1492 #endif 1493 1494 #ifndef CONFIG_HAVE_ARCH_PFN_VALID 1495 /** 1496 * pfn_valid - check if there is a valid memory map entry for a PFN 1497 * @pfn: the page frame number to check 1498 * 1499 * Check if there is a valid memory map entry aka struct page for the @pfn. 1500 * Note, that availability of the memory map entry does not imply that 1501 * there is actual usable memory at that @pfn. The struct page may 1502 * represent a hole or an unusable page frame. 1503 * 1504 * Return: 1 for PFNs that have memory map entries and 0 otherwise 1505 */ 1506 static inline int pfn_valid(unsigned long pfn) 1507 { 1508 struct mem_section *ms; 1509 1510 /* 1511 * Ensure the upper PAGE_SHIFT bits are clear in the 1512 * pfn. Else it might lead to false positives when 1513 * some of the upper bits are set, but the lower bits 1514 * match a valid pfn. 1515 */ 1516 if (PHYS_PFN(PFN_PHYS(pfn)) != pfn) 1517 return 0; 1518 1519 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1520 return 0; 1521 ms = __pfn_to_section(pfn); 1522 if (!valid_section(ms)) 1523 return 0; 1524 /* 1525 * Traditionally early sections always returned pfn_valid() for 1526 * the entire section-sized span. 1527 */ 1528 return early_section(ms) || pfn_section_valid(ms, pfn); 1529 } 1530 #endif 1531 1532 static inline int pfn_in_present_section(unsigned long pfn) 1533 { 1534 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1535 return 0; 1536 return present_section(__pfn_to_section(pfn)); 1537 } 1538 1539 static inline unsigned long next_present_section_nr(unsigned long section_nr) 1540 { 1541 while (++section_nr <= __highest_present_section_nr) { 1542 if (present_section_nr(section_nr)) 1543 return section_nr; 1544 } 1545 1546 return -1; 1547 } 1548 1549 /* 1550 * These are _only_ used during initialisation, therefore they 1551 * can use __initdata ... They could have names to indicate 1552 * this restriction. 1553 */ 1554 #ifdef CONFIG_NUMA 1555 #define pfn_to_nid(pfn) \ 1556 ({ \ 1557 unsigned long __pfn_to_nid_pfn = (pfn); \ 1558 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 1559 }) 1560 #else 1561 #define pfn_to_nid(pfn) (0) 1562 #endif 1563 1564 void sparse_init(void); 1565 #else 1566 #define sparse_init() do {} while (0) 1567 #define sparse_index_init(_sec, _nid) do {} while (0) 1568 #define pfn_in_present_section pfn_valid 1569 #define subsection_map_init(_pfn, _nr_pages) do {} while (0) 1570 #endif /* CONFIG_SPARSEMEM */ 1571 1572 #endif /* !__GENERATING_BOUNDS.H */ 1573 #endif /* !__ASSEMBLY__ */ 1574 #endif /* _LINUX_MMZONE_H */ 1575