1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MMZONE_H 3 #define _LINUX_MMZONE_H 4 5 #ifndef __ASSEMBLY__ 6 #ifndef __GENERATING_BOUNDS_H 7 8 #include <linux/spinlock.h> 9 #include <linux/list.h> 10 #include <linux/wait.h> 11 #include <linux/bitops.h> 12 #include <linux/cache.h> 13 #include <linux/threads.h> 14 #include <linux/numa.h> 15 #include <linux/init.h> 16 #include <linux/seqlock.h> 17 #include <linux/nodemask.h> 18 #include <linux/pageblock-flags.h> 19 #include <linux/page-flags-layout.h> 20 #include <linux/atomic.h> 21 #include <linux/mm_types.h> 22 #include <linux/page-flags.h> 23 #include <asm/page.h> 24 25 /* Free memory management - zoned buddy allocator. */ 26 #ifndef CONFIG_FORCE_MAX_ZONEORDER 27 #define MAX_ORDER 11 28 #else 29 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 30 #endif 31 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 32 33 /* 34 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 35 * costly to service. That is between allocation orders which should 36 * coalesce naturally under reasonable reclaim pressure and those which 37 * will not. 38 */ 39 #define PAGE_ALLOC_COSTLY_ORDER 3 40 41 enum migratetype { 42 MIGRATE_UNMOVABLE, 43 MIGRATE_MOVABLE, 44 MIGRATE_RECLAIMABLE, 45 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 47 #ifdef CONFIG_CMA 48 /* 49 * MIGRATE_CMA migration type is designed to mimic the way 50 * ZONE_MOVABLE works. Only movable pages can be allocated 51 * from MIGRATE_CMA pageblocks and page allocator never 52 * implicitly change migration type of MIGRATE_CMA pageblock. 53 * 54 * The way to use it is to change migratetype of a range of 55 * pageblocks to MIGRATE_CMA which can be done by 56 * __free_pageblock_cma() function. What is important though 57 * is that a range of pageblocks must be aligned to 58 * MAX_ORDER_NR_PAGES should biggest page be bigger then 59 * a single pageblock. 60 */ 61 MIGRATE_CMA, 62 #endif 63 #ifdef CONFIG_MEMORY_ISOLATION 64 MIGRATE_ISOLATE, /* can't allocate from here */ 65 #endif 66 MIGRATE_TYPES 67 }; 68 69 /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 70 extern const char * const migratetype_names[MIGRATE_TYPES]; 71 72 #ifdef CONFIG_CMA 73 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 74 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 75 #else 76 # define is_migrate_cma(migratetype) false 77 # define is_migrate_cma_page(_page) false 78 #endif 79 80 static inline bool is_migrate_movable(int mt) 81 { 82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 83 } 84 85 #define for_each_migratetype_order(order, type) \ 86 for (order = 0; order < MAX_ORDER; order++) \ 87 for (type = 0; type < MIGRATE_TYPES; type++) 88 89 extern int page_group_by_mobility_disabled; 90 91 #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) 92 93 #define get_pageblock_migratetype(page) \ 94 get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) 95 96 struct free_area { 97 struct list_head free_list[MIGRATE_TYPES]; 98 unsigned long nr_free; 99 }; 100 101 static inline struct page *get_page_from_free_area(struct free_area *area, 102 int migratetype) 103 { 104 return list_first_entry_or_null(&area->free_list[migratetype], 105 struct page, lru); 106 } 107 108 static inline bool free_area_empty(struct free_area *area, int migratetype) 109 { 110 return list_empty(&area->free_list[migratetype]); 111 } 112 113 struct pglist_data; 114 115 /* 116 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. 117 * So add a wild amount of padding here to ensure that they fall into separate 118 * cachelines. There are very few zone structures in the machine, so space 119 * consumption is not a concern here. 120 */ 121 #if defined(CONFIG_SMP) 122 struct zone_padding { 123 char x[0]; 124 } ____cacheline_internodealigned_in_smp; 125 #define ZONE_PADDING(name) struct zone_padding name; 126 #else 127 #define ZONE_PADDING(name) 128 #endif 129 130 #ifdef CONFIG_NUMA 131 enum numa_stat_item { 132 NUMA_HIT, /* allocated in intended node */ 133 NUMA_MISS, /* allocated in non intended node */ 134 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 135 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 136 NUMA_LOCAL, /* allocation from local node */ 137 NUMA_OTHER, /* allocation from other node */ 138 NR_VM_NUMA_STAT_ITEMS 139 }; 140 #else 141 #define NR_VM_NUMA_STAT_ITEMS 0 142 #endif 143 144 enum zone_stat_item { 145 /* First 128 byte cacheline (assuming 64 bit words) */ 146 NR_FREE_PAGES, 147 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 148 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 149 NR_ZONE_ACTIVE_ANON, 150 NR_ZONE_INACTIVE_FILE, 151 NR_ZONE_ACTIVE_FILE, 152 NR_ZONE_UNEVICTABLE, 153 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 154 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 155 NR_PAGETABLE, /* used for pagetables */ 156 /* Second 128 byte cacheline */ 157 NR_BOUNCE, 158 #if IS_ENABLED(CONFIG_ZSMALLOC) 159 NR_ZSPAGES, /* allocated in zsmalloc */ 160 #endif 161 NR_FREE_CMA_PAGES, 162 NR_VM_ZONE_STAT_ITEMS }; 163 164 enum node_stat_item { 165 NR_LRU_BASE, 166 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 167 NR_ACTIVE_ANON, /* " " " " " */ 168 NR_INACTIVE_FILE, /* " " " " " */ 169 NR_ACTIVE_FILE, /* " " " " " */ 170 NR_UNEVICTABLE, /* " " " " " */ 171 NR_SLAB_RECLAIMABLE_B, 172 NR_SLAB_UNRECLAIMABLE_B, 173 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 174 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 175 WORKINGSET_NODES, 176 WORKINGSET_REFAULT_BASE, 177 WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, 178 WORKINGSET_REFAULT_FILE, 179 WORKINGSET_ACTIVATE_BASE, 180 WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, 181 WORKINGSET_ACTIVATE_FILE, 182 WORKINGSET_RESTORE_BASE, 183 WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, 184 WORKINGSET_RESTORE_FILE, 185 WORKINGSET_NODERECLAIM, 186 NR_ANON_MAPPED, /* Mapped anonymous pages */ 187 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 188 only modified from process context */ 189 NR_FILE_PAGES, 190 NR_FILE_DIRTY, 191 NR_WRITEBACK, 192 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 193 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 194 NR_SHMEM_THPS, 195 NR_SHMEM_PMDMAPPED, 196 NR_FILE_THPS, 197 NR_FILE_PMDMAPPED, 198 NR_ANON_THPS, 199 NR_VMSCAN_WRITE, 200 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 201 NR_DIRTIED, /* page dirtyings since bootup */ 202 NR_WRITTEN, /* page writings since bootup */ 203 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 204 NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ 205 NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ 206 NR_KERNEL_STACK_KB, /* measured in KiB */ 207 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) 208 NR_KERNEL_SCS_KB, /* measured in KiB */ 209 #endif 210 NR_VM_NODE_STAT_ITEMS 211 }; 212 213 /* 214 * Returns true if the value is measured in bytes (most vmstat values are 215 * measured in pages). This defines the API part, the internal representation 216 * might be different. 217 */ 218 static __always_inline bool vmstat_item_in_bytes(int idx) 219 { 220 /* 221 * Global and per-node slab counters track slab pages. 222 * It's expected that changes are multiples of PAGE_SIZE. 223 * Internally values are stored in pages. 224 * 225 * Per-memcg and per-lruvec counters track memory, consumed 226 * by individual slab objects. These counters are actually 227 * byte-precise. 228 */ 229 return (idx == NR_SLAB_RECLAIMABLE_B || 230 idx == NR_SLAB_UNRECLAIMABLE_B); 231 } 232 233 /* 234 * We do arithmetic on the LRU lists in various places in the code, 235 * so it is important to keep the active lists LRU_ACTIVE higher in 236 * the array than the corresponding inactive lists, and to keep 237 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 238 * 239 * This has to be kept in sync with the statistics in zone_stat_item 240 * above and the descriptions in vmstat_text in mm/vmstat.c 241 */ 242 #define LRU_BASE 0 243 #define LRU_ACTIVE 1 244 #define LRU_FILE 2 245 246 enum lru_list { 247 LRU_INACTIVE_ANON = LRU_BASE, 248 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 249 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 250 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 251 LRU_UNEVICTABLE, 252 NR_LRU_LISTS 253 }; 254 255 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 256 257 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 258 259 static inline bool is_file_lru(enum lru_list lru) 260 { 261 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 262 } 263 264 static inline bool is_active_lru(enum lru_list lru) 265 { 266 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 267 } 268 269 #define ANON_AND_FILE 2 270 271 enum lruvec_flags { 272 LRUVEC_CONGESTED, /* lruvec has many dirty pages 273 * backed by a congested BDI 274 */ 275 }; 276 277 struct lruvec { 278 struct list_head lists[NR_LRU_LISTS]; 279 /* 280 * These track the cost of reclaiming one LRU - file or anon - 281 * over the other. As the observed cost of reclaiming one LRU 282 * increases, the reclaim scan balance tips toward the other. 283 */ 284 unsigned long anon_cost; 285 unsigned long file_cost; 286 /* Non-resident age, driven by LRU movement */ 287 atomic_long_t nonresident_age; 288 /* Refaults at the time of last reclaim cycle */ 289 unsigned long refaults[ANON_AND_FILE]; 290 /* Various lruvec state flags (enum lruvec_flags) */ 291 unsigned long flags; 292 #ifdef CONFIG_MEMCG 293 struct pglist_data *pgdat; 294 #endif 295 }; 296 297 /* Isolate unmapped pages */ 298 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) 299 /* Isolate for asynchronous migration */ 300 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 301 /* Isolate unevictable pages */ 302 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 303 304 /* LRU Isolation modes. */ 305 typedef unsigned __bitwise isolate_mode_t; 306 307 enum zone_watermarks { 308 WMARK_MIN, 309 WMARK_LOW, 310 WMARK_HIGH, 311 NR_WMARK 312 }; 313 314 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) 315 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) 316 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) 317 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) 318 319 struct per_cpu_pages { 320 int count; /* number of pages in the list */ 321 int high; /* high watermark, emptying needed */ 322 int batch; /* chunk size for buddy add/remove */ 323 324 /* Lists of pages, one per migrate type stored on the pcp-lists */ 325 struct list_head lists[MIGRATE_PCPTYPES]; 326 }; 327 328 struct per_cpu_pageset { 329 struct per_cpu_pages pcp; 330 #ifdef CONFIG_NUMA 331 s8 expire; 332 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; 333 #endif 334 #ifdef CONFIG_SMP 335 s8 stat_threshold; 336 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 337 #endif 338 }; 339 340 struct per_cpu_nodestat { 341 s8 stat_threshold; 342 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 343 }; 344 345 #endif /* !__GENERATING_BOUNDS.H */ 346 347 enum zone_type { 348 /* 349 * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able 350 * to DMA to all of the addressable memory (ZONE_NORMAL). 351 * On architectures where this area covers the whole 32 bit address 352 * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller 353 * DMA addressing constraints. This distinction is important as a 32bit 354 * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit 355 * platforms may need both zones as they support peripherals with 356 * different DMA addressing limitations. 357 * 358 * Some examples: 359 * 360 * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the 361 * rest of the lower 4G. 362 * 363 * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on 364 * the specific device. 365 * 366 * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the 367 * lower 4G. 368 * 369 * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary 370 * depending on the specific device. 371 * 372 * - s390 uses ZONE_DMA fixed to the lower 2G. 373 * 374 * - ia64 and riscv only use ZONE_DMA32. 375 * 376 * - parisc uses neither. 377 */ 378 #ifdef CONFIG_ZONE_DMA 379 ZONE_DMA, 380 #endif 381 #ifdef CONFIG_ZONE_DMA32 382 ZONE_DMA32, 383 #endif 384 /* 385 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 386 * performed on pages in ZONE_NORMAL if the DMA devices support 387 * transfers to all addressable memory. 388 */ 389 ZONE_NORMAL, 390 #ifdef CONFIG_HIGHMEM 391 /* 392 * A memory area that is only addressable by the kernel through 393 * mapping portions into its own address space. This is for example 394 * used by i386 to allow the kernel to address the memory beyond 395 * 900MB. The kernel will set up special mappings (page 396 * table entries on i386) for each page that the kernel needs to 397 * access. 398 */ 399 ZONE_HIGHMEM, 400 #endif 401 /* 402 * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains 403 * movable pages with few exceptional cases described below. Main use 404 * cases for ZONE_MOVABLE are to make memory offlining/unplug more 405 * likely to succeed, and to locally limit unmovable allocations - e.g., 406 * to increase the number of THP/huge pages. Notable special cases are: 407 * 408 * 1. Pinned pages: (long-term) pinning of movable pages might 409 * essentially turn such pages unmovable. Memory offlining might 410 * retry a long time. 411 * 2. memblock allocations: kernelcore/movablecore setups might create 412 * situations where ZONE_MOVABLE contains unmovable allocations 413 * after boot. Memory offlining and allocations fail early. 414 * 3. Memory holes: kernelcore/movablecore setups might create very rare 415 * situations where ZONE_MOVABLE contains memory holes after boot, 416 * for example, if we have sections that are only partially 417 * populated. Memory offlining and allocations fail early. 418 * 4. PG_hwpoison pages: while poisoned pages can be skipped during 419 * memory offlining, such pages cannot be allocated. 420 * 5. Unmovable PG_offline pages: in paravirtualized environments, 421 * hotplugged memory blocks might only partially be managed by the 422 * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The 423 * parts not manged by the buddy are unmovable PG_offline pages. In 424 * some cases (virtio-mem), such pages can be skipped during 425 * memory offlining, however, cannot be moved/allocated. These 426 * techniques might use alloc_contig_range() to hide previously 427 * exposed pages from the buddy again (e.g., to implement some sort 428 * of memory unplug in virtio-mem). 429 * 430 * In general, no unmovable allocations that degrade memory offlining 431 * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) 432 * have to expect that migrating pages in ZONE_MOVABLE can fail (even 433 * if has_unmovable_pages() states that there are no unmovable pages, 434 * there can be false negatives). 435 */ 436 ZONE_MOVABLE, 437 #ifdef CONFIG_ZONE_DEVICE 438 ZONE_DEVICE, 439 #endif 440 __MAX_NR_ZONES 441 442 }; 443 444 #ifndef __GENERATING_BOUNDS_H 445 446 #define ASYNC_AND_SYNC 2 447 448 struct zone { 449 /* Read-mostly fields */ 450 451 /* zone watermarks, access with *_wmark_pages(zone) macros */ 452 unsigned long _watermark[NR_WMARK]; 453 unsigned long watermark_boost; 454 455 unsigned long nr_reserved_highatomic; 456 457 /* 458 * We don't know if the memory that we're going to allocate will be 459 * freeable or/and it will be released eventually, so to avoid totally 460 * wasting several GB of ram we must reserve some of the lower zone 461 * memory (otherwise we risk to run OOM on the lower zones despite 462 * there being tons of freeable ram on the higher zones). This array is 463 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 464 * changes. 465 */ 466 long lowmem_reserve[MAX_NR_ZONES]; 467 468 #ifdef CONFIG_NUMA 469 int node; 470 #endif 471 struct pglist_data *zone_pgdat; 472 struct per_cpu_pageset __percpu *pageset; 473 474 #ifndef CONFIG_SPARSEMEM 475 /* 476 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 477 * In SPARSEMEM, this map is stored in struct mem_section 478 */ 479 unsigned long *pageblock_flags; 480 #endif /* CONFIG_SPARSEMEM */ 481 482 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 483 unsigned long zone_start_pfn; 484 485 /* 486 * spanned_pages is the total pages spanned by the zone, including 487 * holes, which is calculated as: 488 * spanned_pages = zone_end_pfn - zone_start_pfn; 489 * 490 * present_pages is physical pages existing within the zone, which 491 * is calculated as: 492 * present_pages = spanned_pages - absent_pages(pages in holes); 493 * 494 * managed_pages is present pages managed by the buddy system, which 495 * is calculated as (reserved_pages includes pages allocated by the 496 * bootmem allocator): 497 * managed_pages = present_pages - reserved_pages; 498 * 499 * So present_pages may be used by memory hotplug or memory power 500 * management logic to figure out unmanaged pages by checking 501 * (present_pages - managed_pages). And managed_pages should be used 502 * by page allocator and vm scanner to calculate all kinds of watermarks 503 * and thresholds. 504 * 505 * Locking rules: 506 * 507 * zone_start_pfn and spanned_pages are protected by span_seqlock. 508 * It is a seqlock because it has to be read outside of zone->lock, 509 * and it is done in the main allocator path. But, it is written 510 * quite infrequently. 511 * 512 * The span_seq lock is declared along with zone->lock because it is 513 * frequently read in proximity to zone->lock. It's good to 514 * give them a chance of being in the same cacheline. 515 * 516 * Write access to present_pages at runtime should be protected by 517 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of 518 * present_pages should get_online_mems() to get a stable value. 519 */ 520 atomic_long_t managed_pages; 521 unsigned long spanned_pages; 522 unsigned long present_pages; 523 524 const char *name; 525 526 #ifdef CONFIG_MEMORY_ISOLATION 527 /* 528 * Number of isolated pageblock. It is used to solve incorrect 529 * freepage counting problem due to racy retrieving migratetype 530 * of pageblock. Protected by zone->lock. 531 */ 532 unsigned long nr_isolate_pageblock; 533 #endif 534 535 #ifdef CONFIG_MEMORY_HOTPLUG 536 /* see spanned/present_pages for more description */ 537 seqlock_t span_seqlock; 538 #endif 539 540 int initialized; 541 542 /* Write-intensive fields used from the page allocator */ 543 ZONE_PADDING(_pad1_) 544 545 /* free areas of different sizes */ 546 struct free_area free_area[MAX_ORDER]; 547 548 /* zone flags, see below */ 549 unsigned long flags; 550 551 /* Primarily protects free_area */ 552 spinlock_t lock; 553 554 /* Write-intensive fields used by compaction and vmstats. */ 555 ZONE_PADDING(_pad2_) 556 557 /* 558 * When free pages are below this point, additional steps are taken 559 * when reading the number of free pages to avoid per-cpu counter 560 * drift allowing watermarks to be breached 561 */ 562 unsigned long percpu_drift_mark; 563 564 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 565 /* pfn where compaction free scanner should start */ 566 unsigned long compact_cached_free_pfn; 567 /* pfn where compaction migration scanner should start */ 568 unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; 569 unsigned long compact_init_migrate_pfn; 570 unsigned long compact_init_free_pfn; 571 #endif 572 573 #ifdef CONFIG_COMPACTION 574 /* 575 * On compaction failure, 1<<compact_defer_shift compactions 576 * are skipped before trying again. The number attempted since 577 * last failure is tracked with compact_considered. 578 * compact_order_failed is the minimum compaction failed order. 579 */ 580 unsigned int compact_considered; 581 unsigned int compact_defer_shift; 582 int compact_order_failed; 583 #endif 584 585 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 586 /* Set to true when the PG_migrate_skip bits should be cleared */ 587 bool compact_blockskip_flush; 588 #endif 589 590 bool contiguous; 591 592 ZONE_PADDING(_pad3_) 593 /* Zone statistics */ 594 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 595 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; 596 } ____cacheline_internodealigned_in_smp; 597 598 enum pgdat_flags { 599 PGDAT_DIRTY, /* reclaim scanning has recently found 600 * many dirty file pages at the tail 601 * of the LRU. 602 */ 603 PGDAT_WRITEBACK, /* reclaim scanning has recently found 604 * many pages under writeback 605 */ 606 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 607 }; 608 609 enum zone_flags { 610 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 611 * Cleared when kswapd is woken. 612 */ 613 }; 614 615 static inline unsigned long zone_managed_pages(struct zone *zone) 616 { 617 return (unsigned long)atomic_long_read(&zone->managed_pages); 618 } 619 620 static inline unsigned long zone_end_pfn(const struct zone *zone) 621 { 622 return zone->zone_start_pfn + zone->spanned_pages; 623 } 624 625 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 626 { 627 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 628 } 629 630 static inline bool zone_is_initialized(struct zone *zone) 631 { 632 return zone->initialized; 633 } 634 635 static inline bool zone_is_empty(struct zone *zone) 636 { 637 return zone->spanned_pages == 0; 638 } 639 640 /* 641 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 642 * intersection with the given zone 643 */ 644 static inline bool zone_intersects(struct zone *zone, 645 unsigned long start_pfn, unsigned long nr_pages) 646 { 647 if (zone_is_empty(zone)) 648 return false; 649 if (start_pfn >= zone_end_pfn(zone) || 650 start_pfn + nr_pages <= zone->zone_start_pfn) 651 return false; 652 653 return true; 654 } 655 656 /* 657 * The "priority" of VM scanning is how much of the queues we will scan in one 658 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 659 * queues ("queue_length >> 12") during an aging round. 660 */ 661 #define DEF_PRIORITY 12 662 663 /* Maximum number of zones on a zonelist */ 664 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 665 666 enum { 667 ZONELIST_FALLBACK, /* zonelist with fallback */ 668 #ifdef CONFIG_NUMA 669 /* 670 * The NUMA zonelists are doubled because we need zonelists that 671 * restrict the allocations to a single node for __GFP_THISNODE. 672 */ 673 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 674 #endif 675 MAX_ZONELISTS 676 }; 677 678 /* 679 * This struct contains information about a zone in a zonelist. It is stored 680 * here to avoid dereferences into large structures and lookups of tables 681 */ 682 struct zoneref { 683 struct zone *zone; /* Pointer to actual zone */ 684 int zone_idx; /* zone_idx(zoneref->zone) */ 685 }; 686 687 /* 688 * One allocation request operates on a zonelist. A zonelist 689 * is a list of zones, the first one is the 'goal' of the 690 * allocation, the other zones are fallback zones, in decreasing 691 * priority. 692 * 693 * To speed the reading of the zonelist, the zonerefs contain the zone index 694 * of the entry being read. Helper functions to access information given 695 * a struct zoneref are 696 * 697 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 698 * zonelist_zone_idx() - Return the index of the zone for an entry 699 * zonelist_node_idx() - Return the index of the node for an entry 700 */ 701 struct zonelist { 702 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 703 }; 704 705 #ifndef CONFIG_DISCONTIGMEM 706 /* The array of struct pages - for discontigmem use pgdat->lmem_map */ 707 extern struct page *mem_map; 708 #endif 709 710 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 711 struct deferred_split { 712 spinlock_t split_queue_lock; 713 struct list_head split_queue; 714 unsigned long split_queue_len; 715 }; 716 #endif 717 718 /* 719 * On NUMA machines, each NUMA node would have a pg_data_t to describe 720 * it's memory layout. On UMA machines there is a single pglist_data which 721 * describes the whole memory. 722 * 723 * Memory statistics and page replacement data structures are maintained on a 724 * per-zone basis. 725 */ 726 typedef struct pglist_data { 727 /* 728 * node_zones contains just the zones for THIS node. Not all of the 729 * zones may be populated, but it is the full list. It is referenced by 730 * this node's node_zonelists as well as other node's node_zonelists. 731 */ 732 struct zone node_zones[MAX_NR_ZONES]; 733 734 /* 735 * node_zonelists contains references to all zones in all nodes. 736 * Generally the first zones will be references to this node's 737 * node_zones. 738 */ 739 struct zonelist node_zonelists[MAX_ZONELISTS]; 740 741 int nr_zones; /* number of populated zones in this node */ 742 #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 743 struct page *node_mem_map; 744 #ifdef CONFIG_PAGE_EXTENSION 745 struct page_ext *node_page_ext; 746 #endif 747 #endif 748 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 749 /* 750 * Must be held any time you expect node_start_pfn, 751 * node_present_pages, node_spanned_pages or nr_zones to stay constant. 752 * Also synchronizes pgdat->first_deferred_pfn during deferred page 753 * init. 754 * 755 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 756 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 757 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 758 * 759 * Nests above zone->lock and zone->span_seqlock 760 */ 761 spinlock_t node_size_lock; 762 #endif 763 unsigned long node_start_pfn; 764 unsigned long node_present_pages; /* total number of physical pages */ 765 unsigned long node_spanned_pages; /* total size of physical page 766 range, including holes */ 767 int node_id; 768 wait_queue_head_t kswapd_wait; 769 wait_queue_head_t pfmemalloc_wait; 770 struct task_struct *kswapd; /* Protected by 771 mem_hotplug_begin/end() */ 772 int kswapd_order; 773 enum zone_type kswapd_highest_zoneidx; 774 775 int kswapd_failures; /* Number of 'reclaimed == 0' runs */ 776 777 #ifdef CONFIG_COMPACTION 778 int kcompactd_max_order; 779 enum zone_type kcompactd_highest_zoneidx; 780 wait_queue_head_t kcompactd_wait; 781 struct task_struct *kcompactd; 782 #endif 783 /* 784 * This is a per-node reserve of pages that are not available 785 * to userspace allocations. 786 */ 787 unsigned long totalreserve_pages; 788 789 #ifdef CONFIG_NUMA 790 /* 791 * node reclaim becomes active if more unmapped pages exist. 792 */ 793 unsigned long min_unmapped_pages; 794 unsigned long min_slab_pages; 795 #endif /* CONFIG_NUMA */ 796 797 /* Write-intensive fields used by page reclaim */ 798 ZONE_PADDING(_pad1_) 799 spinlock_t lru_lock; 800 801 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 802 /* 803 * If memory initialisation on large machines is deferred then this 804 * is the first PFN that needs to be initialised. 805 */ 806 unsigned long first_deferred_pfn; 807 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 808 809 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 810 struct deferred_split deferred_split_queue; 811 #endif 812 813 /* Fields commonly accessed by the page reclaim scanner */ 814 815 /* 816 * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. 817 * 818 * Use mem_cgroup_lruvec() to look up lruvecs. 819 */ 820 struct lruvec __lruvec; 821 822 unsigned long flags; 823 824 ZONE_PADDING(_pad2_) 825 826 /* Per-node vmstats */ 827 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 828 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 829 } pg_data_t; 830 831 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 832 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 833 #ifdef CONFIG_FLAT_NODE_MEM_MAP 834 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 835 #else 836 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 837 #endif 838 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 839 840 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 841 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 842 843 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 844 { 845 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 846 } 847 848 static inline bool pgdat_is_empty(pg_data_t *pgdat) 849 { 850 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 851 } 852 853 #include <linux/memory_hotplug.h> 854 855 void build_all_zonelists(pg_data_t *pgdat); 856 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 857 enum zone_type highest_zoneidx); 858 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 859 int highest_zoneidx, unsigned int alloc_flags, 860 long free_pages); 861 bool zone_watermark_ok(struct zone *z, unsigned int order, 862 unsigned long mark, int highest_zoneidx, 863 unsigned int alloc_flags); 864 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 865 unsigned long mark, int highest_zoneidx); 866 /* 867 * Memory initialization context, use to differentiate memory added by 868 * the platform statically or via memory hotplug interface. 869 */ 870 enum meminit_context { 871 MEMINIT_EARLY, 872 MEMINIT_HOTPLUG, 873 }; 874 875 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 876 unsigned long size); 877 878 extern void lruvec_init(struct lruvec *lruvec); 879 880 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 881 { 882 #ifdef CONFIG_MEMCG 883 return lruvec->pgdat; 884 #else 885 return container_of(lruvec, struct pglist_data, __lruvec); 886 #endif 887 } 888 889 extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); 890 891 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 892 int local_memory_node(int node_id); 893 #else 894 static inline int local_memory_node(int node_id) { return node_id; }; 895 #endif 896 897 /* 898 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 899 */ 900 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 901 902 /* 903 * Returns true if a zone has pages managed by the buddy allocator. 904 * All the reclaim decisions have to use this function rather than 905 * populated_zone(). If the whole zone is reserved then we can easily 906 * end up with populated_zone() && !managed_zone(). 907 */ 908 static inline bool managed_zone(struct zone *zone) 909 { 910 return zone_managed_pages(zone); 911 } 912 913 /* Returns true if a zone has memory */ 914 static inline bool populated_zone(struct zone *zone) 915 { 916 return zone->present_pages; 917 } 918 919 #ifdef CONFIG_NUMA 920 static inline int zone_to_nid(struct zone *zone) 921 { 922 return zone->node; 923 } 924 925 static inline void zone_set_nid(struct zone *zone, int nid) 926 { 927 zone->node = nid; 928 } 929 #else 930 static inline int zone_to_nid(struct zone *zone) 931 { 932 return 0; 933 } 934 935 static inline void zone_set_nid(struct zone *zone, int nid) {} 936 #endif 937 938 extern int movable_zone; 939 940 #ifdef CONFIG_HIGHMEM 941 static inline int zone_movable_is_highmem(void) 942 { 943 #ifdef CONFIG_NEED_MULTIPLE_NODES 944 return movable_zone == ZONE_HIGHMEM; 945 #else 946 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; 947 #endif 948 } 949 #endif 950 951 static inline int is_highmem_idx(enum zone_type idx) 952 { 953 #ifdef CONFIG_HIGHMEM 954 return (idx == ZONE_HIGHMEM || 955 (idx == ZONE_MOVABLE && zone_movable_is_highmem())); 956 #else 957 return 0; 958 #endif 959 } 960 961 /** 962 * is_highmem - helper function to quickly check if a struct zone is a 963 * highmem zone or not. This is an attempt to keep references 964 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 965 * @zone - pointer to struct zone variable 966 */ 967 static inline int is_highmem(struct zone *zone) 968 { 969 #ifdef CONFIG_HIGHMEM 970 return is_highmem_idx(zone_idx(zone)); 971 #else 972 return 0; 973 #endif 974 } 975 976 /* These two functions are used to setup the per zone pages min values */ 977 struct ctl_table; 978 979 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, 980 loff_t *); 981 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, 982 size_t *, loff_t *); 983 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; 984 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, 985 size_t *, loff_t *); 986 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, 987 void *, size_t *, loff_t *); 988 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 989 void *, size_t *, loff_t *); 990 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 991 void *, size_t *, loff_t *); 992 int numa_zonelist_order_handler(struct ctl_table *, int, 993 void *, size_t *, loff_t *); 994 extern int percpu_pagelist_fraction; 995 extern char numa_zonelist_order[]; 996 #define NUMA_ZONELIST_ORDER_LEN 16 997 998 #ifndef CONFIG_NEED_MULTIPLE_NODES 999 1000 extern struct pglist_data contig_page_data; 1001 #define NODE_DATA(nid) (&contig_page_data) 1002 #define NODE_MEM_MAP(nid) mem_map 1003 1004 #else /* CONFIG_NEED_MULTIPLE_NODES */ 1005 1006 #include <asm/mmzone.h> 1007 1008 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 1009 1010 extern struct pglist_data *first_online_pgdat(void); 1011 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 1012 extern struct zone *next_zone(struct zone *zone); 1013 1014 /** 1015 * for_each_online_pgdat - helper macro to iterate over all online nodes 1016 * @pgdat - pointer to a pg_data_t variable 1017 */ 1018 #define for_each_online_pgdat(pgdat) \ 1019 for (pgdat = first_online_pgdat(); \ 1020 pgdat; \ 1021 pgdat = next_online_pgdat(pgdat)) 1022 /** 1023 * for_each_zone - helper macro to iterate over all memory zones 1024 * @zone - pointer to struct zone variable 1025 * 1026 * The user only needs to declare the zone variable, for_each_zone 1027 * fills it in. 1028 */ 1029 #define for_each_zone(zone) \ 1030 for (zone = (first_online_pgdat())->node_zones; \ 1031 zone; \ 1032 zone = next_zone(zone)) 1033 1034 #define for_each_populated_zone(zone) \ 1035 for (zone = (first_online_pgdat())->node_zones; \ 1036 zone; \ 1037 zone = next_zone(zone)) \ 1038 if (!populated_zone(zone)) \ 1039 ; /* do nothing */ \ 1040 else 1041 1042 static inline struct zone *zonelist_zone(struct zoneref *zoneref) 1043 { 1044 return zoneref->zone; 1045 } 1046 1047 static inline int zonelist_zone_idx(struct zoneref *zoneref) 1048 { 1049 return zoneref->zone_idx; 1050 } 1051 1052 static inline int zonelist_node_idx(struct zoneref *zoneref) 1053 { 1054 return zone_to_nid(zoneref->zone); 1055 } 1056 1057 struct zoneref *__next_zones_zonelist(struct zoneref *z, 1058 enum zone_type highest_zoneidx, 1059 nodemask_t *nodes); 1060 1061 /** 1062 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1063 * @z - The cursor used as a starting point for the search 1064 * @highest_zoneidx - The zone index of the highest zone to return 1065 * @nodes - An optional nodemask to filter the zonelist with 1066 * 1067 * This function returns the next zone at or below a given zone index that is 1068 * within the allowed nodemask using a cursor as the starting point for the 1069 * search. The zoneref returned is a cursor that represents the current zone 1070 * being examined. It should be advanced by one before calling 1071 * next_zones_zonelist again. 1072 */ 1073 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 1074 enum zone_type highest_zoneidx, 1075 nodemask_t *nodes) 1076 { 1077 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 1078 return z; 1079 return __next_zones_zonelist(z, highest_zoneidx, nodes); 1080 } 1081 1082 /** 1083 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1084 * @zonelist - The zonelist to search for a suitable zone 1085 * @highest_zoneidx - The zone index of the highest zone to return 1086 * @nodes - An optional nodemask to filter the zonelist with 1087 * @return - Zoneref pointer for the first suitable zone found (see below) 1088 * 1089 * This function returns the first zone at or below a given zone index that is 1090 * within the allowed nodemask. The zoneref returned is a cursor that can be 1091 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1092 * one before calling. 1093 * 1094 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1095 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1096 * update due to cpuset modification. 1097 */ 1098 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1099 enum zone_type highest_zoneidx, 1100 nodemask_t *nodes) 1101 { 1102 return next_zones_zonelist(zonelist->_zonerefs, 1103 highest_zoneidx, nodes); 1104 } 1105 1106 /** 1107 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1108 * @zone - The current zone in the iterator 1109 * @z - The current pointer within zonelist->_zonerefs being iterated 1110 * @zlist - The zonelist being iterated 1111 * @highidx - The zone index of the highest zone to return 1112 * @nodemask - Nodemask allowed by the allocator 1113 * 1114 * This iterator iterates though all zones at or below a given zone index and 1115 * within a given nodemask 1116 */ 1117 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1118 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1119 zone; \ 1120 z = next_zones_zonelist(++z, highidx, nodemask), \ 1121 zone = zonelist_zone(z)) 1122 1123 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ 1124 for (zone = z->zone; \ 1125 zone; \ 1126 z = next_zones_zonelist(++z, highidx, nodemask), \ 1127 zone = zonelist_zone(z)) 1128 1129 1130 /** 1131 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1132 * @zone - The current zone in the iterator 1133 * @z - The current pointer within zonelist->zones being iterated 1134 * @zlist - The zonelist being iterated 1135 * @highidx - The zone index of the highest zone to return 1136 * 1137 * This iterator iterates though all zones at or below a given zone index. 1138 */ 1139 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1140 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1141 1142 #ifdef CONFIG_SPARSEMEM 1143 #include <asm/sparsemem.h> 1144 #endif 1145 1146 #ifdef CONFIG_FLATMEM 1147 #define pfn_to_nid(pfn) (0) 1148 #endif 1149 1150 #ifdef CONFIG_SPARSEMEM 1151 1152 /* 1153 * SECTION_SHIFT #bits space required to store a section # 1154 * 1155 * PA_SECTION_SHIFT physical address to/from section number 1156 * PFN_SECTION_SHIFT pfn to/from section number 1157 */ 1158 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1159 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1160 1161 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1162 1163 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1164 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1165 1166 #define SECTION_BLOCKFLAGS_BITS \ 1167 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1168 1169 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1170 #error Allocator MAX_ORDER exceeds SECTION_SIZE 1171 #endif 1172 1173 static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1174 { 1175 return pfn >> PFN_SECTION_SHIFT; 1176 } 1177 static inline unsigned long section_nr_to_pfn(unsigned long sec) 1178 { 1179 return sec << PFN_SECTION_SHIFT; 1180 } 1181 1182 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1183 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1184 1185 #define SUBSECTION_SHIFT 21 1186 #define SUBSECTION_SIZE (1UL << SUBSECTION_SHIFT) 1187 1188 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) 1189 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) 1190 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) 1191 1192 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS 1193 #error Subsection size exceeds section size 1194 #else 1195 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) 1196 #endif 1197 1198 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) 1199 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) 1200 1201 struct mem_section_usage { 1202 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1203 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); 1204 #endif 1205 /* See declaration of similar field in struct zone */ 1206 unsigned long pageblock_flags[0]; 1207 }; 1208 1209 void subsection_map_init(unsigned long pfn, unsigned long nr_pages); 1210 1211 struct page; 1212 struct page_ext; 1213 struct mem_section { 1214 /* 1215 * This is, logically, a pointer to an array of struct 1216 * pages. However, it is stored with some other magic. 1217 * (see sparse.c::sparse_init_one_section()) 1218 * 1219 * Additionally during early boot we encode node id of 1220 * the location of the section here to guide allocation. 1221 * (see sparse.c::memory_present()) 1222 * 1223 * Making it a UL at least makes someone do a cast 1224 * before using it wrong. 1225 */ 1226 unsigned long section_mem_map; 1227 1228 struct mem_section_usage *usage; 1229 #ifdef CONFIG_PAGE_EXTENSION 1230 /* 1231 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 1232 * section. (see page_ext.h about this.) 1233 */ 1234 struct page_ext *page_ext; 1235 unsigned long pad; 1236 #endif 1237 /* 1238 * WARNING: mem_section must be a power-of-2 in size for the 1239 * calculation and use of SECTION_ROOT_MASK to make sense. 1240 */ 1241 }; 1242 1243 #ifdef CONFIG_SPARSEMEM_EXTREME 1244 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1245 #else 1246 #define SECTIONS_PER_ROOT 1 1247 #endif 1248 1249 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1250 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1251 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1252 1253 #ifdef CONFIG_SPARSEMEM_EXTREME 1254 extern struct mem_section **mem_section; 1255 #else 1256 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1257 #endif 1258 1259 static inline unsigned long *section_to_usemap(struct mem_section *ms) 1260 { 1261 return ms->usage->pageblock_flags; 1262 } 1263 1264 static inline struct mem_section *__nr_to_section(unsigned long nr) 1265 { 1266 #ifdef CONFIG_SPARSEMEM_EXTREME 1267 if (!mem_section) 1268 return NULL; 1269 #endif 1270 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 1271 return NULL; 1272 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 1273 } 1274 extern unsigned long __section_nr(struct mem_section *ms); 1275 extern size_t mem_section_usage_size(void); 1276 1277 /* 1278 * We use the lower bits of the mem_map pointer to store 1279 * a little bit of information. The pointer is calculated 1280 * as mem_map - section_nr_to_pfn(pnum). The result is 1281 * aligned to the minimum alignment of the two values: 1282 * 1. All mem_map arrays are page-aligned. 1283 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 1284 * lowest bits. PFN_SECTION_SHIFT is arch-specific 1285 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 1286 * worst combination is powerpc with 256k pages, 1287 * which results in PFN_SECTION_SHIFT equal 6. 1288 * To sum it up, at least 6 bits are available. 1289 */ 1290 #define SECTION_MARKED_PRESENT (1UL<<0) 1291 #define SECTION_HAS_MEM_MAP (1UL<<1) 1292 #define SECTION_IS_ONLINE (1UL<<2) 1293 #define SECTION_IS_EARLY (1UL<<3) 1294 #define SECTION_MAP_LAST_BIT (1UL<<4) 1295 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1296 #define SECTION_NID_SHIFT 3 1297 1298 static inline struct page *__section_mem_map_addr(struct mem_section *section) 1299 { 1300 unsigned long map = section->section_mem_map; 1301 map &= SECTION_MAP_MASK; 1302 return (struct page *)map; 1303 } 1304 1305 static inline int present_section(struct mem_section *section) 1306 { 1307 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 1308 } 1309 1310 static inline int present_section_nr(unsigned long nr) 1311 { 1312 return present_section(__nr_to_section(nr)); 1313 } 1314 1315 static inline int valid_section(struct mem_section *section) 1316 { 1317 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 1318 } 1319 1320 static inline int early_section(struct mem_section *section) 1321 { 1322 return (section && (section->section_mem_map & SECTION_IS_EARLY)); 1323 } 1324 1325 static inline int valid_section_nr(unsigned long nr) 1326 { 1327 return valid_section(__nr_to_section(nr)); 1328 } 1329 1330 static inline int online_section(struct mem_section *section) 1331 { 1332 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 1333 } 1334 1335 static inline int online_section_nr(unsigned long nr) 1336 { 1337 return online_section(__nr_to_section(nr)); 1338 } 1339 1340 #ifdef CONFIG_MEMORY_HOTPLUG 1341 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1342 #ifdef CONFIG_MEMORY_HOTREMOVE 1343 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1344 #endif 1345 #endif 1346 1347 static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1348 { 1349 return __nr_to_section(pfn_to_section_nr(pfn)); 1350 } 1351 1352 extern unsigned long __highest_present_section_nr; 1353 1354 static inline int subsection_map_index(unsigned long pfn) 1355 { 1356 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; 1357 } 1358 1359 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1360 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1361 { 1362 int idx = subsection_map_index(pfn); 1363 1364 return test_bit(idx, ms->usage->subsection_map); 1365 } 1366 #else 1367 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1368 { 1369 return 1; 1370 } 1371 #endif 1372 1373 #ifndef CONFIG_HAVE_ARCH_PFN_VALID 1374 static inline int pfn_valid(unsigned long pfn) 1375 { 1376 struct mem_section *ms; 1377 1378 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1379 return 0; 1380 ms = __nr_to_section(pfn_to_section_nr(pfn)); 1381 if (!valid_section(ms)) 1382 return 0; 1383 /* 1384 * Traditionally early sections always returned pfn_valid() for 1385 * the entire section-sized span. 1386 */ 1387 return early_section(ms) || pfn_section_valid(ms, pfn); 1388 } 1389 #endif 1390 1391 static inline int pfn_in_present_section(unsigned long pfn) 1392 { 1393 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1394 return 0; 1395 return present_section(__nr_to_section(pfn_to_section_nr(pfn))); 1396 } 1397 1398 static inline unsigned long next_present_section_nr(unsigned long section_nr) 1399 { 1400 while (++section_nr <= __highest_present_section_nr) { 1401 if (present_section_nr(section_nr)) 1402 return section_nr; 1403 } 1404 1405 return -1; 1406 } 1407 1408 /* 1409 * These are _only_ used during initialisation, therefore they 1410 * can use __initdata ... They could have names to indicate 1411 * this restriction. 1412 */ 1413 #ifdef CONFIG_NUMA 1414 #define pfn_to_nid(pfn) \ 1415 ({ \ 1416 unsigned long __pfn_to_nid_pfn = (pfn); \ 1417 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 1418 }) 1419 #else 1420 #define pfn_to_nid(pfn) (0) 1421 #endif 1422 1423 void sparse_init(void); 1424 #else 1425 #define sparse_init() do {} while (0) 1426 #define sparse_index_init(_sec, _nid) do {} while (0) 1427 #define pfn_in_present_section pfn_valid 1428 #define subsection_map_init(_pfn, _nr_pages) do {} while (0) 1429 #endif /* CONFIG_SPARSEMEM */ 1430 1431 /* 1432 * During memory init memblocks map pfns to nids. The search is expensive and 1433 * this caches recent lookups. The implementation of __early_pfn_to_nid 1434 * may treat start/end as pfns or sections. 1435 */ 1436 struct mminit_pfnnid_cache { 1437 unsigned long last_start; 1438 unsigned long last_end; 1439 int last_nid; 1440 }; 1441 1442 /* 1443 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 1444 * need to check pfn validity within that MAX_ORDER_NR_PAGES block. 1445 * pfn_valid_within() should be used in this case; we optimise this away 1446 * when we have no holes within a MAX_ORDER_NR_PAGES block. 1447 */ 1448 #ifdef CONFIG_HOLES_IN_ZONE 1449 #define pfn_valid_within(pfn) pfn_valid(pfn) 1450 #else 1451 #define pfn_valid_within(pfn) (1) 1452 #endif 1453 1454 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1455 /* 1456 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1457 * associated with it or not. This means that a struct page exists for this 1458 * pfn. The caller cannot assume the page is fully initialized in general. 1459 * Hotplugable pages might not have been onlined yet. pfn_to_online_page() 1460 * will ensure the struct page is fully online and initialized. Special pages 1461 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. 1462 * 1463 * In FLATMEM, it is expected that holes always have valid memmap as long as 1464 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed 1465 * that a valid section has a memmap for the entire section. 1466 * 1467 * However, an ARM, and maybe other embedded architectures in the future 1468 * free memmap backing holes to save memory on the assumption the memmap is 1469 * never used. The page_zone linkages are then broken even though pfn_valid() 1470 * returns true. A walker of the full memmap must then do this additional 1471 * check to ensure the memmap they are looking at is sane by making sure 1472 * the zone and PFN linkages are still valid. This is expensive, but walkers 1473 * of the full memmap are extremely rare. 1474 */ 1475 bool memmap_valid_within(unsigned long pfn, 1476 struct page *page, struct zone *zone); 1477 #else 1478 static inline bool memmap_valid_within(unsigned long pfn, 1479 struct page *page, struct zone *zone) 1480 { 1481 return true; 1482 } 1483 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 1484 1485 #endif /* !__GENERATING_BOUNDS.H */ 1486 #endif /* !__ASSEMBLY__ */ 1487 #endif /* _LINUX_MMZONE_H */ 1488