1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MMZONE_H 3 #define _LINUX_MMZONE_H 4 5 #ifndef __ASSEMBLY__ 6 #ifndef __GENERATING_BOUNDS_H 7 8 #include <linux/spinlock.h> 9 #include <linux/list.h> 10 #include <linux/wait.h> 11 #include <linux/bitops.h> 12 #include <linux/cache.h> 13 #include <linux/threads.h> 14 #include <linux/numa.h> 15 #include <linux/init.h> 16 #include <linux/seqlock.h> 17 #include <linux/nodemask.h> 18 #include <linux/pageblock-flags.h> 19 #include <linux/page-flags-layout.h> 20 #include <linux/atomic.h> 21 #include <linux/mm_types.h> 22 #include <linux/page-flags.h> 23 #include <asm/page.h> 24 25 /* Free memory management - zoned buddy allocator. */ 26 #ifndef CONFIG_FORCE_MAX_ZONEORDER 27 #define MAX_ORDER 11 28 #else 29 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 30 #endif 31 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 32 33 /* 34 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 35 * costly to service. That is between allocation orders which should 36 * coalesce naturally under reasonable reclaim pressure and those which 37 * will not. 38 */ 39 #define PAGE_ALLOC_COSTLY_ORDER 3 40 41 enum migratetype { 42 MIGRATE_UNMOVABLE, 43 MIGRATE_MOVABLE, 44 MIGRATE_RECLAIMABLE, 45 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 46 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 47 #ifdef CONFIG_CMA 48 /* 49 * MIGRATE_CMA migration type is designed to mimic the way 50 * ZONE_MOVABLE works. Only movable pages can be allocated 51 * from MIGRATE_CMA pageblocks and page allocator never 52 * implicitly change migration type of MIGRATE_CMA pageblock. 53 * 54 * The way to use it is to change migratetype of a range of 55 * pageblocks to MIGRATE_CMA which can be done by 56 * __free_pageblock_cma() function. What is important though 57 * is that a range of pageblocks must be aligned to 58 * MAX_ORDER_NR_PAGES should biggest page be bigger then 59 * a single pageblock. 60 */ 61 MIGRATE_CMA, 62 #endif 63 #ifdef CONFIG_MEMORY_ISOLATION 64 MIGRATE_ISOLATE, /* can't allocate from here */ 65 #endif 66 MIGRATE_TYPES 67 }; 68 69 /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 70 extern const char * const migratetype_names[MIGRATE_TYPES]; 71 72 #ifdef CONFIG_CMA 73 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 74 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 75 #else 76 # define is_migrate_cma(migratetype) false 77 # define is_migrate_cma_page(_page) false 78 #endif 79 80 static inline bool is_migrate_movable(int mt) 81 { 82 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 83 } 84 85 #define for_each_migratetype_order(order, type) \ 86 for (order = 0; order < MAX_ORDER; order++) \ 87 for (type = 0; type < MIGRATE_TYPES; type++) 88 89 extern int page_group_by_mobility_disabled; 90 91 #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) 92 #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) 93 94 #define get_pageblock_migratetype(page) \ 95 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 96 PB_migrate_end, MIGRATETYPE_MASK) 97 98 struct free_area { 99 struct list_head free_list[MIGRATE_TYPES]; 100 unsigned long nr_free; 101 }; 102 103 /* Used for pages not on another list */ 104 static inline void add_to_free_area(struct page *page, struct free_area *area, 105 int migratetype) 106 { 107 list_add(&page->lru, &area->free_list[migratetype]); 108 area->nr_free++; 109 } 110 111 /* Used for pages not on another list */ 112 static inline void add_to_free_area_tail(struct page *page, struct free_area *area, 113 int migratetype) 114 { 115 list_add_tail(&page->lru, &area->free_list[migratetype]); 116 area->nr_free++; 117 } 118 119 #ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR 120 /* Used to preserve page allocation order entropy */ 121 void add_to_free_area_random(struct page *page, struct free_area *area, 122 int migratetype); 123 #else 124 static inline void add_to_free_area_random(struct page *page, 125 struct free_area *area, int migratetype) 126 { 127 add_to_free_area(page, area, migratetype); 128 } 129 #endif 130 131 /* Used for pages which are on another list */ 132 static inline void move_to_free_area(struct page *page, struct free_area *area, 133 int migratetype) 134 { 135 list_move(&page->lru, &area->free_list[migratetype]); 136 } 137 138 static inline struct page *get_page_from_free_area(struct free_area *area, 139 int migratetype) 140 { 141 return list_first_entry_or_null(&area->free_list[migratetype], 142 struct page, lru); 143 } 144 145 static inline void del_page_from_free_area(struct page *page, 146 struct free_area *area) 147 { 148 list_del(&page->lru); 149 __ClearPageBuddy(page); 150 set_page_private(page, 0); 151 area->nr_free--; 152 } 153 154 static inline bool free_area_empty(struct free_area *area, int migratetype) 155 { 156 return list_empty(&area->free_list[migratetype]); 157 } 158 159 struct pglist_data; 160 161 /* 162 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. 163 * So add a wild amount of padding here to ensure that they fall into separate 164 * cachelines. There are very few zone structures in the machine, so space 165 * consumption is not a concern here. 166 */ 167 #if defined(CONFIG_SMP) 168 struct zone_padding { 169 char x[0]; 170 } ____cacheline_internodealigned_in_smp; 171 #define ZONE_PADDING(name) struct zone_padding name; 172 #else 173 #define ZONE_PADDING(name) 174 #endif 175 176 #ifdef CONFIG_NUMA 177 enum numa_stat_item { 178 NUMA_HIT, /* allocated in intended node */ 179 NUMA_MISS, /* allocated in non intended node */ 180 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 181 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 182 NUMA_LOCAL, /* allocation from local node */ 183 NUMA_OTHER, /* allocation from other node */ 184 NR_VM_NUMA_STAT_ITEMS 185 }; 186 #else 187 #define NR_VM_NUMA_STAT_ITEMS 0 188 #endif 189 190 enum zone_stat_item { 191 /* First 128 byte cacheline (assuming 64 bit words) */ 192 NR_FREE_PAGES, 193 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 194 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 195 NR_ZONE_ACTIVE_ANON, 196 NR_ZONE_INACTIVE_FILE, 197 NR_ZONE_ACTIVE_FILE, 198 NR_ZONE_UNEVICTABLE, 199 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 200 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 201 NR_PAGETABLE, /* used for pagetables */ 202 NR_KERNEL_STACK_KB, /* measured in KiB */ 203 /* Second 128 byte cacheline */ 204 NR_BOUNCE, 205 #if IS_ENABLED(CONFIG_ZSMALLOC) 206 NR_ZSPAGES, /* allocated in zsmalloc */ 207 #endif 208 NR_FREE_CMA_PAGES, 209 NR_VM_ZONE_STAT_ITEMS }; 210 211 enum node_stat_item { 212 NR_LRU_BASE, 213 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 214 NR_ACTIVE_ANON, /* " " " " " */ 215 NR_INACTIVE_FILE, /* " " " " " */ 216 NR_ACTIVE_FILE, /* " " " " " */ 217 NR_UNEVICTABLE, /* " " " " " */ 218 NR_SLAB_RECLAIMABLE, 219 NR_SLAB_UNRECLAIMABLE, 220 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 221 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 222 WORKINGSET_NODES, 223 WORKINGSET_REFAULT, 224 WORKINGSET_ACTIVATE, 225 WORKINGSET_RESTORE, 226 WORKINGSET_NODERECLAIM, 227 NR_ANON_MAPPED, /* Mapped anonymous pages */ 228 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 229 only modified from process context */ 230 NR_FILE_PAGES, 231 NR_FILE_DIRTY, 232 NR_WRITEBACK, 233 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 234 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 235 NR_SHMEM_THPS, 236 NR_SHMEM_PMDMAPPED, 237 NR_ANON_THPS, 238 NR_UNSTABLE_NFS, /* NFS unstable pages */ 239 NR_VMSCAN_WRITE, 240 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 241 NR_DIRTIED, /* page dirtyings since bootup */ 242 NR_WRITTEN, /* page writings since bootup */ 243 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 244 NR_VM_NODE_STAT_ITEMS 245 }; 246 247 /* 248 * We do arithmetic on the LRU lists in various places in the code, 249 * so it is important to keep the active lists LRU_ACTIVE higher in 250 * the array than the corresponding inactive lists, and to keep 251 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 252 * 253 * This has to be kept in sync with the statistics in zone_stat_item 254 * above and the descriptions in vmstat_text in mm/vmstat.c 255 */ 256 #define LRU_BASE 0 257 #define LRU_ACTIVE 1 258 #define LRU_FILE 2 259 260 enum lru_list { 261 LRU_INACTIVE_ANON = LRU_BASE, 262 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 263 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 264 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 265 LRU_UNEVICTABLE, 266 NR_LRU_LISTS 267 }; 268 269 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 270 271 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 272 273 static inline int is_file_lru(enum lru_list lru) 274 { 275 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 276 } 277 278 static inline int is_active_lru(enum lru_list lru) 279 { 280 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 281 } 282 283 struct zone_reclaim_stat { 284 /* 285 * The pageout code in vmscan.c keeps track of how many of the 286 * mem/swap backed and file backed pages are referenced. 287 * The higher the rotated/scanned ratio, the more valuable 288 * that cache is. 289 * 290 * The anon LRU stats live in [0], file LRU stats in [1] 291 */ 292 unsigned long recent_rotated[2]; 293 unsigned long recent_scanned[2]; 294 }; 295 296 struct lruvec { 297 struct list_head lists[NR_LRU_LISTS]; 298 struct zone_reclaim_stat reclaim_stat; 299 /* Evictions & activations on the inactive file list */ 300 atomic_long_t inactive_age; 301 /* Refaults at the time of last reclaim cycle */ 302 unsigned long refaults; 303 #ifdef CONFIG_MEMCG 304 struct pglist_data *pgdat; 305 #endif 306 }; 307 308 /* Isolate unmapped file */ 309 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) 310 /* Isolate for asynchronous migration */ 311 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 312 /* Isolate unevictable pages */ 313 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 314 315 /* LRU Isolation modes. */ 316 typedef unsigned __bitwise isolate_mode_t; 317 318 enum zone_watermarks { 319 WMARK_MIN, 320 WMARK_LOW, 321 WMARK_HIGH, 322 NR_WMARK 323 }; 324 325 #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) 326 #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) 327 #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) 328 #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) 329 330 struct per_cpu_pages { 331 int count; /* number of pages in the list */ 332 int high; /* high watermark, emptying needed */ 333 int batch; /* chunk size for buddy add/remove */ 334 335 /* Lists of pages, one per migrate type stored on the pcp-lists */ 336 struct list_head lists[MIGRATE_PCPTYPES]; 337 }; 338 339 struct per_cpu_pageset { 340 struct per_cpu_pages pcp; 341 #ifdef CONFIG_NUMA 342 s8 expire; 343 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; 344 #endif 345 #ifdef CONFIG_SMP 346 s8 stat_threshold; 347 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 348 #endif 349 }; 350 351 struct per_cpu_nodestat { 352 s8 stat_threshold; 353 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 354 }; 355 356 #endif /* !__GENERATING_BOUNDS.H */ 357 358 enum zone_type { 359 #ifdef CONFIG_ZONE_DMA 360 /* 361 * ZONE_DMA is used when there are devices that are not able 362 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we 363 * carve out the portion of memory that is needed for these devices. 364 * The range is arch specific. 365 * 366 * Some examples 367 * 368 * Architecture Limit 369 * --------------------------- 370 * parisc, ia64, sparc <4G 371 * s390, powerpc <2G 372 * arm Various 373 * alpha Unlimited or 0-16MB. 374 * 375 * i386, x86_64 and multiple other arches 376 * <16M. 377 */ 378 ZONE_DMA, 379 #endif 380 #ifdef CONFIG_ZONE_DMA32 381 /* 382 * x86_64 needs two ZONE_DMAs because it supports devices that are 383 * only able to do DMA to the lower 16M but also 32 bit devices that 384 * can only do DMA areas below 4G. 385 */ 386 ZONE_DMA32, 387 #endif 388 /* 389 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 390 * performed on pages in ZONE_NORMAL if the DMA devices support 391 * transfers to all addressable memory. 392 */ 393 ZONE_NORMAL, 394 #ifdef CONFIG_HIGHMEM 395 /* 396 * A memory area that is only addressable by the kernel through 397 * mapping portions into its own address space. This is for example 398 * used by i386 to allow the kernel to address the memory beyond 399 * 900MB. The kernel will set up special mappings (page 400 * table entries on i386) for each page that the kernel needs to 401 * access. 402 */ 403 ZONE_HIGHMEM, 404 #endif 405 ZONE_MOVABLE, 406 #ifdef CONFIG_ZONE_DEVICE 407 ZONE_DEVICE, 408 #endif 409 __MAX_NR_ZONES 410 411 }; 412 413 #ifndef __GENERATING_BOUNDS_H 414 415 struct zone { 416 /* Read-mostly fields */ 417 418 /* zone watermarks, access with *_wmark_pages(zone) macros */ 419 unsigned long _watermark[NR_WMARK]; 420 unsigned long watermark_boost; 421 422 unsigned long nr_reserved_highatomic; 423 424 /* 425 * We don't know if the memory that we're going to allocate will be 426 * freeable or/and it will be released eventually, so to avoid totally 427 * wasting several GB of ram we must reserve some of the lower zone 428 * memory (otherwise we risk to run OOM on the lower zones despite 429 * there being tons of freeable ram on the higher zones). This array is 430 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 431 * changes. 432 */ 433 long lowmem_reserve[MAX_NR_ZONES]; 434 435 #ifdef CONFIG_NUMA 436 int node; 437 #endif 438 struct pglist_data *zone_pgdat; 439 struct per_cpu_pageset __percpu *pageset; 440 441 #ifndef CONFIG_SPARSEMEM 442 /* 443 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 444 * In SPARSEMEM, this map is stored in struct mem_section 445 */ 446 unsigned long *pageblock_flags; 447 #endif /* CONFIG_SPARSEMEM */ 448 449 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 450 unsigned long zone_start_pfn; 451 452 /* 453 * spanned_pages is the total pages spanned by the zone, including 454 * holes, which is calculated as: 455 * spanned_pages = zone_end_pfn - zone_start_pfn; 456 * 457 * present_pages is physical pages existing within the zone, which 458 * is calculated as: 459 * present_pages = spanned_pages - absent_pages(pages in holes); 460 * 461 * managed_pages is present pages managed by the buddy system, which 462 * is calculated as (reserved_pages includes pages allocated by the 463 * bootmem allocator): 464 * managed_pages = present_pages - reserved_pages; 465 * 466 * So present_pages may be used by memory hotplug or memory power 467 * management logic to figure out unmanaged pages by checking 468 * (present_pages - managed_pages). And managed_pages should be used 469 * by page allocator and vm scanner to calculate all kinds of watermarks 470 * and thresholds. 471 * 472 * Locking rules: 473 * 474 * zone_start_pfn and spanned_pages are protected by span_seqlock. 475 * It is a seqlock because it has to be read outside of zone->lock, 476 * and it is done in the main allocator path. But, it is written 477 * quite infrequently. 478 * 479 * The span_seq lock is declared along with zone->lock because it is 480 * frequently read in proximity to zone->lock. It's good to 481 * give them a chance of being in the same cacheline. 482 * 483 * Write access to present_pages at runtime should be protected by 484 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of 485 * present_pages should get_online_mems() to get a stable value. 486 */ 487 atomic_long_t managed_pages; 488 unsigned long spanned_pages; 489 unsigned long present_pages; 490 491 const char *name; 492 493 #ifdef CONFIG_MEMORY_ISOLATION 494 /* 495 * Number of isolated pageblock. It is used to solve incorrect 496 * freepage counting problem due to racy retrieving migratetype 497 * of pageblock. Protected by zone->lock. 498 */ 499 unsigned long nr_isolate_pageblock; 500 #endif 501 502 #ifdef CONFIG_MEMORY_HOTPLUG 503 /* see spanned/present_pages for more description */ 504 seqlock_t span_seqlock; 505 #endif 506 507 int initialized; 508 509 /* Write-intensive fields used from the page allocator */ 510 ZONE_PADDING(_pad1_) 511 512 /* free areas of different sizes */ 513 struct free_area free_area[MAX_ORDER]; 514 515 /* zone flags, see below */ 516 unsigned long flags; 517 518 /* Primarily protects free_area */ 519 spinlock_t lock; 520 521 /* Write-intensive fields used by compaction and vmstats. */ 522 ZONE_PADDING(_pad2_) 523 524 /* 525 * When free pages are below this point, additional steps are taken 526 * when reading the number of free pages to avoid per-cpu counter 527 * drift allowing watermarks to be breached 528 */ 529 unsigned long percpu_drift_mark; 530 531 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 532 /* pfn where compaction free scanner should start */ 533 unsigned long compact_cached_free_pfn; 534 /* pfn where async and sync compaction migration scanner should start */ 535 unsigned long compact_cached_migrate_pfn[2]; 536 unsigned long compact_init_migrate_pfn; 537 unsigned long compact_init_free_pfn; 538 #endif 539 540 #ifdef CONFIG_COMPACTION 541 /* 542 * On compaction failure, 1<<compact_defer_shift compactions 543 * are skipped before trying again. The number attempted since 544 * last failure is tracked with compact_considered. 545 */ 546 unsigned int compact_considered; 547 unsigned int compact_defer_shift; 548 int compact_order_failed; 549 #endif 550 551 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 552 /* Set to true when the PG_migrate_skip bits should be cleared */ 553 bool compact_blockskip_flush; 554 #endif 555 556 bool contiguous; 557 558 ZONE_PADDING(_pad3_) 559 /* Zone statistics */ 560 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 561 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; 562 } ____cacheline_internodealigned_in_smp; 563 564 enum pgdat_flags { 565 PGDAT_CONGESTED, /* pgdat has many dirty pages backed by 566 * a congested BDI 567 */ 568 PGDAT_DIRTY, /* reclaim scanning has recently found 569 * many dirty file pages at the tail 570 * of the LRU. 571 */ 572 PGDAT_WRITEBACK, /* reclaim scanning has recently found 573 * many pages under writeback 574 */ 575 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 576 }; 577 578 enum zone_flags { 579 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. 580 * Cleared when kswapd is woken. 581 */ 582 }; 583 584 static inline unsigned long zone_managed_pages(struct zone *zone) 585 { 586 return (unsigned long)atomic_long_read(&zone->managed_pages); 587 } 588 589 static inline unsigned long zone_end_pfn(const struct zone *zone) 590 { 591 return zone->zone_start_pfn + zone->spanned_pages; 592 } 593 594 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 595 { 596 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 597 } 598 599 static inline bool zone_is_initialized(struct zone *zone) 600 { 601 return zone->initialized; 602 } 603 604 static inline bool zone_is_empty(struct zone *zone) 605 { 606 return zone->spanned_pages == 0; 607 } 608 609 /* 610 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 611 * intersection with the given zone 612 */ 613 static inline bool zone_intersects(struct zone *zone, 614 unsigned long start_pfn, unsigned long nr_pages) 615 { 616 if (zone_is_empty(zone)) 617 return false; 618 if (start_pfn >= zone_end_pfn(zone) || 619 start_pfn + nr_pages <= zone->zone_start_pfn) 620 return false; 621 622 return true; 623 } 624 625 /* 626 * The "priority" of VM scanning is how much of the queues we will scan in one 627 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 628 * queues ("queue_length >> 12") during an aging round. 629 */ 630 #define DEF_PRIORITY 12 631 632 /* Maximum number of zones on a zonelist */ 633 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 634 635 enum { 636 ZONELIST_FALLBACK, /* zonelist with fallback */ 637 #ifdef CONFIG_NUMA 638 /* 639 * The NUMA zonelists are doubled because we need zonelists that 640 * restrict the allocations to a single node for __GFP_THISNODE. 641 */ 642 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 643 #endif 644 MAX_ZONELISTS 645 }; 646 647 /* 648 * This struct contains information about a zone in a zonelist. It is stored 649 * here to avoid dereferences into large structures and lookups of tables 650 */ 651 struct zoneref { 652 struct zone *zone; /* Pointer to actual zone */ 653 int zone_idx; /* zone_idx(zoneref->zone) */ 654 }; 655 656 /* 657 * One allocation request operates on a zonelist. A zonelist 658 * is a list of zones, the first one is the 'goal' of the 659 * allocation, the other zones are fallback zones, in decreasing 660 * priority. 661 * 662 * To speed the reading of the zonelist, the zonerefs contain the zone index 663 * of the entry being read. Helper functions to access information given 664 * a struct zoneref are 665 * 666 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 667 * zonelist_zone_idx() - Return the index of the zone for an entry 668 * zonelist_node_idx() - Return the index of the node for an entry 669 */ 670 struct zonelist { 671 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 672 }; 673 674 #ifndef CONFIG_DISCONTIGMEM 675 /* The array of struct pages - for discontigmem use pgdat->lmem_map */ 676 extern struct page *mem_map; 677 #endif 678 679 /* 680 * On NUMA machines, each NUMA node would have a pg_data_t to describe 681 * it's memory layout. On UMA machines there is a single pglist_data which 682 * describes the whole memory. 683 * 684 * Memory statistics and page replacement data structures are maintained on a 685 * per-zone basis. 686 */ 687 struct bootmem_data; 688 typedef struct pglist_data { 689 struct zone node_zones[MAX_NR_ZONES]; 690 struct zonelist node_zonelists[MAX_ZONELISTS]; 691 int nr_zones; 692 #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 693 struct page *node_mem_map; 694 #ifdef CONFIG_PAGE_EXTENSION 695 struct page_ext *node_page_ext; 696 #endif 697 #endif 698 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 699 /* 700 * Must be held any time you expect node_start_pfn, 701 * node_present_pages, node_spanned_pages or nr_zones to stay constant. 702 * 703 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 704 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 705 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 706 * 707 * Nests above zone->lock and zone->span_seqlock 708 */ 709 spinlock_t node_size_lock; 710 #endif 711 unsigned long node_start_pfn; 712 unsigned long node_present_pages; /* total number of physical pages */ 713 unsigned long node_spanned_pages; /* total size of physical page 714 range, including holes */ 715 int node_id; 716 wait_queue_head_t kswapd_wait; 717 wait_queue_head_t pfmemalloc_wait; 718 struct task_struct *kswapd; /* Protected by 719 mem_hotplug_begin/end() */ 720 int kswapd_order; 721 enum zone_type kswapd_classzone_idx; 722 723 int kswapd_failures; /* Number of 'reclaimed == 0' runs */ 724 725 #ifdef CONFIG_COMPACTION 726 int kcompactd_max_order; 727 enum zone_type kcompactd_classzone_idx; 728 wait_queue_head_t kcompactd_wait; 729 struct task_struct *kcompactd; 730 #endif 731 /* 732 * This is a per-node reserve of pages that are not available 733 * to userspace allocations. 734 */ 735 unsigned long totalreserve_pages; 736 737 #ifdef CONFIG_NUMA 738 /* 739 * zone reclaim becomes active if more unmapped pages exist. 740 */ 741 unsigned long min_unmapped_pages; 742 unsigned long min_slab_pages; 743 #endif /* CONFIG_NUMA */ 744 745 /* Write-intensive fields used by page reclaim */ 746 ZONE_PADDING(_pad1_) 747 spinlock_t lru_lock; 748 749 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 750 /* 751 * If memory initialisation on large machines is deferred then this 752 * is the first PFN that needs to be initialised. 753 */ 754 unsigned long first_deferred_pfn; 755 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 756 757 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 758 spinlock_t split_queue_lock; 759 struct list_head split_queue; 760 unsigned long split_queue_len; 761 #endif 762 763 /* Fields commonly accessed by the page reclaim scanner */ 764 struct lruvec lruvec; 765 766 unsigned long flags; 767 768 ZONE_PADDING(_pad2_) 769 770 /* Per-node vmstats */ 771 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 772 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 773 } pg_data_t; 774 775 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 776 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 777 #ifdef CONFIG_FLAT_NODE_MEM_MAP 778 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 779 #else 780 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 781 #endif 782 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 783 784 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 785 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 786 787 static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) 788 { 789 return &pgdat->lruvec; 790 } 791 792 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 793 { 794 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 795 } 796 797 static inline bool pgdat_is_empty(pg_data_t *pgdat) 798 { 799 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 800 } 801 802 #include <linux/memory_hotplug.h> 803 804 void build_all_zonelists(pg_data_t *pgdat); 805 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 806 enum zone_type classzone_idx); 807 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 808 int classzone_idx, unsigned int alloc_flags, 809 long free_pages); 810 bool zone_watermark_ok(struct zone *z, unsigned int order, 811 unsigned long mark, int classzone_idx, 812 unsigned int alloc_flags); 813 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 814 unsigned long mark, int classzone_idx); 815 enum memmap_context { 816 MEMMAP_EARLY, 817 MEMMAP_HOTPLUG, 818 }; 819 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 820 unsigned long size); 821 822 extern void lruvec_init(struct lruvec *lruvec); 823 824 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 825 { 826 #ifdef CONFIG_MEMCG 827 return lruvec->pgdat; 828 #else 829 return container_of(lruvec, struct pglist_data, lruvec); 830 #endif 831 } 832 833 extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); 834 835 #ifdef CONFIG_HAVE_MEMORY_PRESENT 836 void memory_present(int nid, unsigned long start, unsigned long end); 837 #else 838 static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 839 #endif 840 841 #if defined(CONFIG_SPARSEMEM) 842 void memblocks_present(void); 843 #else 844 static inline void memblocks_present(void) {} 845 #endif 846 847 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 848 int local_memory_node(int node_id); 849 #else 850 static inline int local_memory_node(int node_id) { return node_id; }; 851 #endif 852 853 /* 854 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 855 */ 856 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 857 858 /* 859 * Returns true if a zone has pages managed by the buddy allocator. 860 * All the reclaim decisions have to use this function rather than 861 * populated_zone(). If the whole zone is reserved then we can easily 862 * end up with populated_zone() && !managed_zone(). 863 */ 864 static inline bool managed_zone(struct zone *zone) 865 { 866 return zone_managed_pages(zone); 867 } 868 869 /* Returns true if a zone has memory */ 870 static inline bool populated_zone(struct zone *zone) 871 { 872 return zone->present_pages; 873 } 874 875 #ifdef CONFIG_NUMA 876 static inline int zone_to_nid(struct zone *zone) 877 { 878 return zone->node; 879 } 880 881 static inline void zone_set_nid(struct zone *zone, int nid) 882 { 883 zone->node = nid; 884 } 885 #else 886 static inline int zone_to_nid(struct zone *zone) 887 { 888 return 0; 889 } 890 891 static inline void zone_set_nid(struct zone *zone, int nid) {} 892 #endif 893 894 extern int movable_zone; 895 896 #ifdef CONFIG_HIGHMEM 897 static inline int zone_movable_is_highmem(void) 898 { 899 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 900 return movable_zone == ZONE_HIGHMEM; 901 #else 902 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; 903 #endif 904 } 905 #endif 906 907 static inline int is_highmem_idx(enum zone_type idx) 908 { 909 #ifdef CONFIG_HIGHMEM 910 return (idx == ZONE_HIGHMEM || 911 (idx == ZONE_MOVABLE && zone_movable_is_highmem())); 912 #else 913 return 0; 914 #endif 915 } 916 917 /** 918 * is_highmem - helper function to quickly check if a struct zone is a 919 * highmem zone or not. This is an attempt to keep references 920 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 921 * @zone - pointer to struct zone variable 922 */ 923 static inline int is_highmem(struct zone *zone) 924 { 925 #ifdef CONFIG_HIGHMEM 926 return is_highmem_idx(zone_idx(zone)); 927 #else 928 return 0; 929 #endif 930 } 931 932 /* These two functions are used to setup the per zone pages min values */ 933 struct ctl_table; 934 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, 935 void __user *, size_t *, loff_t *); 936 int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, 937 void __user *, size_t *, loff_t *); 938 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, 939 void __user *, size_t *, loff_t *); 940 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; 941 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, 942 void __user *, size_t *, loff_t *); 943 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, 944 void __user *, size_t *, loff_t *); 945 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 946 void __user *, size_t *, loff_t *); 947 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 948 void __user *, size_t *, loff_t *); 949 950 extern int numa_zonelist_order_handler(struct ctl_table *, int, 951 void __user *, size_t *, loff_t *); 952 extern char numa_zonelist_order[]; 953 #define NUMA_ZONELIST_ORDER_LEN 16 954 955 #ifndef CONFIG_NEED_MULTIPLE_NODES 956 957 extern struct pglist_data contig_page_data; 958 #define NODE_DATA(nid) (&contig_page_data) 959 #define NODE_MEM_MAP(nid) mem_map 960 961 #else /* CONFIG_NEED_MULTIPLE_NODES */ 962 963 #include <asm/mmzone.h> 964 965 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 966 967 extern struct pglist_data *first_online_pgdat(void); 968 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 969 extern struct zone *next_zone(struct zone *zone); 970 971 /** 972 * for_each_online_pgdat - helper macro to iterate over all online nodes 973 * @pgdat - pointer to a pg_data_t variable 974 */ 975 #define for_each_online_pgdat(pgdat) \ 976 for (pgdat = first_online_pgdat(); \ 977 pgdat; \ 978 pgdat = next_online_pgdat(pgdat)) 979 /** 980 * for_each_zone - helper macro to iterate over all memory zones 981 * @zone - pointer to struct zone variable 982 * 983 * The user only needs to declare the zone variable, for_each_zone 984 * fills it in. 985 */ 986 #define for_each_zone(zone) \ 987 for (zone = (first_online_pgdat())->node_zones; \ 988 zone; \ 989 zone = next_zone(zone)) 990 991 #define for_each_populated_zone(zone) \ 992 for (zone = (first_online_pgdat())->node_zones; \ 993 zone; \ 994 zone = next_zone(zone)) \ 995 if (!populated_zone(zone)) \ 996 ; /* do nothing */ \ 997 else 998 999 static inline struct zone *zonelist_zone(struct zoneref *zoneref) 1000 { 1001 return zoneref->zone; 1002 } 1003 1004 static inline int zonelist_zone_idx(struct zoneref *zoneref) 1005 { 1006 return zoneref->zone_idx; 1007 } 1008 1009 static inline int zonelist_node_idx(struct zoneref *zoneref) 1010 { 1011 return zone_to_nid(zoneref->zone); 1012 } 1013 1014 struct zoneref *__next_zones_zonelist(struct zoneref *z, 1015 enum zone_type highest_zoneidx, 1016 nodemask_t *nodes); 1017 1018 /** 1019 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 1020 * @z - The cursor used as a starting point for the search 1021 * @highest_zoneidx - The zone index of the highest zone to return 1022 * @nodes - An optional nodemask to filter the zonelist with 1023 * 1024 * This function returns the next zone at or below a given zone index that is 1025 * within the allowed nodemask using a cursor as the starting point for the 1026 * search. The zoneref returned is a cursor that represents the current zone 1027 * being examined. It should be advanced by one before calling 1028 * next_zones_zonelist again. 1029 */ 1030 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 1031 enum zone_type highest_zoneidx, 1032 nodemask_t *nodes) 1033 { 1034 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 1035 return z; 1036 return __next_zones_zonelist(z, highest_zoneidx, nodes); 1037 } 1038 1039 /** 1040 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 1041 * @zonelist - The zonelist to search for a suitable zone 1042 * @highest_zoneidx - The zone index of the highest zone to return 1043 * @nodes - An optional nodemask to filter the zonelist with 1044 * @return - Zoneref pointer for the first suitable zone found (see below) 1045 * 1046 * This function returns the first zone at or below a given zone index that is 1047 * within the allowed nodemask. The zoneref returned is a cursor that can be 1048 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1049 * one before calling. 1050 * 1051 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1052 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1053 * update due to cpuset modification. 1054 */ 1055 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1056 enum zone_type highest_zoneidx, 1057 nodemask_t *nodes) 1058 { 1059 return next_zones_zonelist(zonelist->_zonerefs, 1060 highest_zoneidx, nodes); 1061 } 1062 1063 /** 1064 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1065 * @zone - The current zone in the iterator 1066 * @z - The current pointer within zonelist->zones being iterated 1067 * @zlist - The zonelist being iterated 1068 * @highidx - The zone index of the highest zone to return 1069 * @nodemask - Nodemask allowed by the allocator 1070 * 1071 * This iterator iterates though all zones at or below a given zone index and 1072 * within a given nodemask 1073 */ 1074 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1075 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1076 zone; \ 1077 z = next_zones_zonelist(++z, highidx, nodemask), \ 1078 zone = zonelist_zone(z)) 1079 1080 #define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1081 for (zone = z->zone; \ 1082 zone; \ 1083 z = next_zones_zonelist(++z, highidx, nodemask), \ 1084 zone = zonelist_zone(z)) 1085 1086 1087 /** 1088 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1089 * @zone - The current zone in the iterator 1090 * @z - The current pointer within zonelist->zones being iterated 1091 * @zlist - The zonelist being iterated 1092 * @highidx - The zone index of the highest zone to return 1093 * 1094 * This iterator iterates though all zones at or below a given zone index. 1095 */ 1096 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1097 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1098 1099 #ifdef CONFIG_SPARSEMEM 1100 #include <asm/sparsemem.h> 1101 #endif 1102 1103 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 1104 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1105 static inline unsigned long early_pfn_to_nid(unsigned long pfn) 1106 { 1107 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA)); 1108 return 0; 1109 } 1110 #endif 1111 1112 #ifdef CONFIG_FLATMEM 1113 #define pfn_to_nid(pfn) (0) 1114 #endif 1115 1116 #ifdef CONFIG_SPARSEMEM 1117 1118 /* 1119 * SECTION_SHIFT #bits space required to store a section # 1120 * 1121 * PA_SECTION_SHIFT physical address to/from section number 1122 * PFN_SECTION_SHIFT pfn to/from section number 1123 */ 1124 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1125 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1126 1127 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1128 1129 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1130 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1131 1132 #define SECTION_BLOCKFLAGS_BITS \ 1133 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1134 1135 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1136 #error Allocator MAX_ORDER exceeds SECTION_SIZE 1137 #endif 1138 1139 static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1140 { 1141 return pfn >> PFN_SECTION_SHIFT; 1142 } 1143 static inline unsigned long section_nr_to_pfn(unsigned long sec) 1144 { 1145 return sec << PFN_SECTION_SHIFT; 1146 } 1147 1148 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1149 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1150 1151 #define SUBSECTION_SHIFT 21 1152 1153 #define PFN_SUBSECTION_SHIFT (SUBSECTION_SHIFT - PAGE_SHIFT) 1154 #define PAGES_PER_SUBSECTION (1UL << PFN_SUBSECTION_SHIFT) 1155 #define PAGE_SUBSECTION_MASK (~(PAGES_PER_SUBSECTION-1)) 1156 1157 #if SUBSECTION_SHIFT > SECTION_SIZE_BITS 1158 #error Subsection size exceeds section size 1159 #else 1160 #define SUBSECTIONS_PER_SECTION (1UL << (SECTION_SIZE_BITS - SUBSECTION_SHIFT)) 1161 #endif 1162 1163 #define SUBSECTION_ALIGN_UP(pfn) ALIGN((pfn), PAGES_PER_SUBSECTION) 1164 #define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK) 1165 1166 struct mem_section_usage { 1167 DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION); 1168 /* See declaration of similar field in struct zone */ 1169 unsigned long pageblock_flags[0]; 1170 }; 1171 1172 void subsection_map_init(unsigned long pfn, unsigned long nr_pages); 1173 1174 struct page; 1175 struct page_ext; 1176 struct mem_section { 1177 /* 1178 * This is, logically, a pointer to an array of struct 1179 * pages. However, it is stored with some other magic. 1180 * (see sparse.c::sparse_init_one_section()) 1181 * 1182 * Additionally during early boot we encode node id of 1183 * the location of the section here to guide allocation. 1184 * (see sparse.c::memory_present()) 1185 * 1186 * Making it a UL at least makes someone do a cast 1187 * before using it wrong. 1188 */ 1189 unsigned long section_mem_map; 1190 1191 struct mem_section_usage *usage; 1192 #ifdef CONFIG_PAGE_EXTENSION 1193 /* 1194 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 1195 * section. (see page_ext.h about this.) 1196 */ 1197 struct page_ext *page_ext; 1198 unsigned long pad; 1199 #endif 1200 /* 1201 * WARNING: mem_section must be a power-of-2 in size for the 1202 * calculation and use of SECTION_ROOT_MASK to make sense. 1203 */ 1204 }; 1205 1206 #ifdef CONFIG_SPARSEMEM_EXTREME 1207 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1208 #else 1209 #define SECTIONS_PER_ROOT 1 1210 #endif 1211 1212 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1213 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1214 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1215 1216 #ifdef CONFIG_SPARSEMEM_EXTREME 1217 extern struct mem_section **mem_section; 1218 #else 1219 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1220 #endif 1221 1222 static inline unsigned long *section_to_usemap(struct mem_section *ms) 1223 { 1224 return ms->usage->pageblock_flags; 1225 } 1226 1227 static inline struct mem_section *__nr_to_section(unsigned long nr) 1228 { 1229 #ifdef CONFIG_SPARSEMEM_EXTREME 1230 if (!mem_section) 1231 return NULL; 1232 #endif 1233 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 1234 return NULL; 1235 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 1236 } 1237 extern unsigned long __section_nr(struct mem_section *ms); 1238 extern size_t mem_section_usage_size(void); 1239 1240 /* 1241 * We use the lower bits of the mem_map pointer to store 1242 * a little bit of information. The pointer is calculated 1243 * as mem_map - section_nr_to_pfn(pnum). The result is 1244 * aligned to the minimum alignment of the two values: 1245 * 1. All mem_map arrays are page-aligned. 1246 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 1247 * lowest bits. PFN_SECTION_SHIFT is arch-specific 1248 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 1249 * worst combination is powerpc with 256k pages, 1250 * which results in PFN_SECTION_SHIFT equal 6. 1251 * To sum it up, at least 6 bits are available. 1252 */ 1253 #define SECTION_MARKED_PRESENT (1UL<<0) 1254 #define SECTION_HAS_MEM_MAP (1UL<<1) 1255 #define SECTION_IS_ONLINE (1UL<<2) 1256 #define SECTION_IS_EARLY (1UL<<3) 1257 #define SECTION_MAP_LAST_BIT (1UL<<4) 1258 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1259 #define SECTION_NID_SHIFT 3 1260 1261 static inline struct page *__section_mem_map_addr(struct mem_section *section) 1262 { 1263 unsigned long map = section->section_mem_map; 1264 map &= SECTION_MAP_MASK; 1265 return (struct page *)map; 1266 } 1267 1268 static inline int present_section(struct mem_section *section) 1269 { 1270 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 1271 } 1272 1273 static inline int present_section_nr(unsigned long nr) 1274 { 1275 return present_section(__nr_to_section(nr)); 1276 } 1277 1278 static inline int valid_section(struct mem_section *section) 1279 { 1280 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 1281 } 1282 1283 static inline int early_section(struct mem_section *section) 1284 { 1285 return (section && (section->section_mem_map & SECTION_IS_EARLY)); 1286 } 1287 1288 static inline int valid_section_nr(unsigned long nr) 1289 { 1290 return valid_section(__nr_to_section(nr)); 1291 } 1292 1293 static inline int online_section(struct mem_section *section) 1294 { 1295 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 1296 } 1297 1298 static inline int online_section_nr(unsigned long nr) 1299 { 1300 return online_section(__nr_to_section(nr)); 1301 } 1302 1303 #ifdef CONFIG_MEMORY_HOTPLUG 1304 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1305 #ifdef CONFIG_MEMORY_HOTREMOVE 1306 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1307 #endif 1308 #endif 1309 1310 static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1311 { 1312 return __nr_to_section(pfn_to_section_nr(pfn)); 1313 } 1314 1315 extern unsigned long __highest_present_section_nr; 1316 1317 static inline int subsection_map_index(unsigned long pfn) 1318 { 1319 return (pfn & ~(PAGE_SECTION_MASK)) / PAGES_PER_SUBSECTION; 1320 } 1321 1322 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1323 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1324 { 1325 int idx = subsection_map_index(pfn); 1326 1327 return test_bit(idx, ms->usage->subsection_map); 1328 } 1329 #else 1330 static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) 1331 { 1332 return 1; 1333 } 1334 #endif 1335 1336 #ifndef CONFIG_HAVE_ARCH_PFN_VALID 1337 static inline int pfn_valid(unsigned long pfn) 1338 { 1339 struct mem_section *ms; 1340 1341 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1342 return 0; 1343 ms = __nr_to_section(pfn_to_section_nr(pfn)); 1344 if (!valid_section(ms)) 1345 return 0; 1346 /* 1347 * Traditionally early sections always returned pfn_valid() for 1348 * the entire section-sized span. 1349 */ 1350 return early_section(ms) || pfn_section_valid(ms, pfn); 1351 } 1352 #endif 1353 1354 static inline int pfn_present(unsigned long pfn) 1355 { 1356 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1357 return 0; 1358 return present_section(__nr_to_section(pfn_to_section_nr(pfn))); 1359 } 1360 1361 /* 1362 * These are _only_ used during initialisation, therefore they 1363 * can use __initdata ... They could have names to indicate 1364 * this restriction. 1365 */ 1366 #ifdef CONFIG_NUMA 1367 #define pfn_to_nid(pfn) \ 1368 ({ \ 1369 unsigned long __pfn_to_nid_pfn = (pfn); \ 1370 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 1371 }) 1372 #else 1373 #define pfn_to_nid(pfn) (0) 1374 #endif 1375 1376 #define early_pfn_valid(pfn) pfn_valid(pfn) 1377 void sparse_init(void); 1378 #else 1379 #define sparse_init() do {} while (0) 1380 #define sparse_index_init(_sec, _nid) do {} while (0) 1381 #define pfn_present pfn_valid 1382 #define subsection_map_init(_pfn, _nr_pages) do {} while (0) 1383 #endif /* CONFIG_SPARSEMEM */ 1384 1385 /* 1386 * During memory init memblocks map pfns to nids. The search is expensive and 1387 * this caches recent lookups. The implementation of __early_pfn_to_nid 1388 * may treat start/end as pfns or sections. 1389 */ 1390 struct mminit_pfnnid_cache { 1391 unsigned long last_start; 1392 unsigned long last_end; 1393 int last_nid; 1394 }; 1395 1396 #ifndef early_pfn_valid 1397 #define early_pfn_valid(pfn) (1) 1398 #endif 1399 1400 void memory_present(int nid, unsigned long start, unsigned long end); 1401 1402 /* 1403 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 1404 * need to check pfn validity within that MAX_ORDER_NR_PAGES block. 1405 * pfn_valid_within() should be used in this case; we optimise this away 1406 * when we have no holes within a MAX_ORDER_NR_PAGES block. 1407 */ 1408 #ifdef CONFIG_HOLES_IN_ZONE 1409 #define pfn_valid_within(pfn) pfn_valid(pfn) 1410 #else 1411 #define pfn_valid_within(pfn) (1) 1412 #endif 1413 1414 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1415 /* 1416 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1417 * associated with it or not. This means that a struct page exists for this 1418 * pfn. The caller cannot assume the page is fully initialized in general. 1419 * Hotplugable pages might not have been onlined yet. pfn_to_online_page() 1420 * will ensure the struct page is fully online and initialized. Special pages 1421 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. 1422 * 1423 * In FLATMEM, it is expected that holes always have valid memmap as long as 1424 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed 1425 * that a valid section has a memmap for the entire section. 1426 * 1427 * However, an ARM, and maybe other embedded architectures in the future 1428 * free memmap backing holes to save memory on the assumption the memmap is 1429 * never used. The page_zone linkages are then broken even though pfn_valid() 1430 * returns true. A walker of the full memmap must then do this additional 1431 * check to ensure the memmap they are looking at is sane by making sure 1432 * the zone and PFN linkages are still valid. This is expensive, but walkers 1433 * of the full memmap are extremely rare. 1434 */ 1435 bool memmap_valid_within(unsigned long pfn, 1436 struct page *page, struct zone *zone); 1437 #else 1438 static inline bool memmap_valid_within(unsigned long pfn, 1439 struct page *page, struct zone *zone) 1440 { 1441 return true; 1442 } 1443 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 1444 1445 #endif /* !__GENERATING_BOUNDS.H */ 1446 #endif /* !__ASSEMBLY__ */ 1447 #endif /* _LINUX_MMZONE_H */ 1448