1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MMZONE_H 3 #define _LINUX_MMZONE_H 4 5 #ifndef __ASSEMBLY__ 6 #ifndef __GENERATING_BOUNDS_H 7 8 #include <linux/spinlock.h> 9 #include <linux/list.h> 10 #include <linux/wait.h> 11 #include <linux/bitops.h> 12 #include <linux/cache.h> 13 #include <linux/threads.h> 14 #include <linux/numa.h> 15 #include <linux/init.h> 16 #include <linux/seqlock.h> 17 #include <linux/nodemask.h> 18 #include <linux/pageblock-flags.h> 19 #include <linux/page-flags-layout.h> 20 #include <linux/atomic.h> 21 #include <asm/page.h> 22 23 /* Free memory management - zoned buddy allocator. */ 24 #ifndef CONFIG_FORCE_MAX_ZONEORDER 25 #define MAX_ORDER 11 26 #else 27 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 28 #endif 29 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 30 31 /* 32 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 33 * costly to service. That is between allocation orders which should 34 * coalesce naturally under reasonable reclaim pressure and those which 35 * will not. 36 */ 37 #define PAGE_ALLOC_COSTLY_ORDER 3 38 39 enum migratetype { 40 MIGRATE_UNMOVABLE, 41 MIGRATE_MOVABLE, 42 MIGRATE_RECLAIMABLE, 43 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 44 MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, 45 #ifdef CONFIG_CMA 46 /* 47 * MIGRATE_CMA migration type is designed to mimic the way 48 * ZONE_MOVABLE works. Only movable pages can be allocated 49 * from MIGRATE_CMA pageblocks and page allocator never 50 * implicitly change migration type of MIGRATE_CMA pageblock. 51 * 52 * The way to use it is to change migratetype of a range of 53 * pageblocks to MIGRATE_CMA which can be done by 54 * __free_pageblock_cma() function. What is important though 55 * is that a range of pageblocks must be aligned to 56 * MAX_ORDER_NR_PAGES should biggest page be bigger then 57 * a single pageblock. 58 */ 59 MIGRATE_CMA, 60 #endif 61 #ifdef CONFIG_MEMORY_ISOLATION 62 MIGRATE_ISOLATE, /* can't allocate from here */ 63 #endif 64 MIGRATE_TYPES 65 }; 66 67 /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ 68 extern char * const migratetype_names[MIGRATE_TYPES]; 69 70 #ifdef CONFIG_CMA 71 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 72 # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) 73 #else 74 # define is_migrate_cma(migratetype) false 75 # define is_migrate_cma_page(_page) false 76 #endif 77 78 static inline bool is_migrate_movable(int mt) 79 { 80 return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; 81 } 82 83 #define for_each_migratetype_order(order, type) \ 84 for (order = 0; order < MAX_ORDER; order++) \ 85 for (type = 0; type < MIGRATE_TYPES; type++) 86 87 extern int page_group_by_mobility_disabled; 88 89 #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) 90 #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) 91 92 #define get_pageblock_migratetype(page) \ 93 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 94 PB_migrate_end, MIGRATETYPE_MASK) 95 96 struct free_area { 97 struct list_head free_list[MIGRATE_TYPES]; 98 unsigned long nr_free; 99 }; 100 101 struct pglist_data; 102 103 /* 104 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel. 105 * So add a wild amount of padding here to ensure that they fall into separate 106 * cachelines. There are very few zone structures in the machine, so space 107 * consumption is not a concern here. 108 */ 109 #if defined(CONFIG_SMP) 110 struct zone_padding { 111 char x[0]; 112 } ____cacheline_internodealigned_in_smp; 113 #define ZONE_PADDING(name) struct zone_padding name; 114 #else 115 #define ZONE_PADDING(name) 116 #endif 117 118 #ifdef CONFIG_NUMA 119 enum numa_stat_item { 120 NUMA_HIT, /* allocated in intended node */ 121 NUMA_MISS, /* allocated in non intended node */ 122 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 123 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 124 NUMA_LOCAL, /* allocation from local node */ 125 NUMA_OTHER, /* allocation from other node */ 126 NR_VM_NUMA_STAT_ITEMS 127 }; 128 #else 129 #define NR_VM_NUMA_STAT_ITEMS 0 130 #endif 131 132 enum zone_stat_item { 133 /* First 128 byte cacheline (assuming 64 bit words) */ 134 NR_FREE_PAGES, 135 NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ 136 NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, 137 NR_ZONE_ACTIVE_ANON, 138 NR_ZONE_INACTIVE_FILE, 139 NR_ZONE_ACTIVE_FILE, 140 NR_ZONE_UNEVICTABLE, 141 NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ 142 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 143 NR_PAGETABLE, /* used for pagetables */ 144 NR_KERNEL_STACK_KB, /* measured in KiB */ 145 /* Second 128 byte cacheline */ 146 NR_BOUNCE, 147 #if IS_ENABLED(CONFIG_ZSMALLOC) 148 NR_ZSPAGES, /* allocated in zsmalloc */ 149 #endif 150 NR_FREE_CMA_PAGES, 151 NR_VM_ZONE_STAT_ITEMS }; 152 153 enum node_stat_item { 154 NR_LRU_BASE, 155 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 156 NR_ACTIVE_ANON, /* " " " " " */ 157 NR_INACTIVE_FILE, /* " " " " " */ 158 NR_ACTIVE_FILE, /* " " " " " */ 159 NR_UNEVICTABLE, /* " " " " " */ 160 NR_SLAB_RECLAIMABLE, 161 NR_SLAB_UNRECLAIMABLE, 162 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 163 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 164 WORKINGSET_NODES, 165 WORKINGSET_REFAULT, 166 WORKINGSET_ACTIVATE, 167 WORKINGSET_RESTORE, 168 WORKINGSET_NODERECLAIM, 169 NR_ANON_MAPPED, /* Mapped anonymous pages */ 170 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 171 only modified from process context */ 172 NR_FILE_PAGES, 173 NR_FILE_DIRTY, 174 NR_WRITEBACK, 175 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 176 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 177 NR_SHMEM_THPS, 178 NR_SHMEM_PMDMAPPED, 179 NR_ANON_THPS, 180 NR_UNSTABLE_NFS, /* NFS unstable pages */ 181 NR_VMSCAN_WRITE, 182 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 183 NR_DIRTIED, /* page dirtyings since bootup */ 184 NR_WRITTEN, /* page writings since bootup */ 185 NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ 186 NR_VM_NODE_STAT_ITEMS 187 }; 188 189 /* 190 * We do arithmetic on the LRU lists in various places in the code, 191 * so it is important to keep the active lists LRU_ACTIVE higher in 192 * the array than the corresponding inactive lists, and to keep 193 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 194 * 195 * This has to be kept in sync with the statistics in zone_stat_item 196 * above and the descriptions in vmstat_text in mm/vmstat.c 197 */ 198 #define LRU_BASE 0 199 #define LRU_ACTIVE 1 200 #define LRU_FILE 2 201 202 enum lru_list { 203 LRU_INACTIVE_ANON = LRU_BASE, 204 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 205 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 206 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 207 LRU_UNEVICTABLE, 208 NR_LRU_LISTS 209 }; 210 211 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 212 213 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 214 215 static inline int is_file_lru(enum lru_list lru) 216 { 217 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 218 } 219 220 static inline int is_active_lru(enum lru_list lru) 221 { 222 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 223 } 224 225 struct zone_reclaim_stat { 226 /* 227 * The pageout code in vmscan.c keeps track of how many of the 228 * mem/swap backed and file backed pages are referenced. 229 * The higher the rotated/scanned ratio, the more valuable 230 * that cache is. 231 * 232 * The anon LRU stats live in [0], file LRU stats in [1] 233 */ 234 unsigned long recent_rotated[2]; 235 unsigned long recent_scanned[2]; 236 }; 237 238 struct lruvec { 239 struct list_head lists[NR_LRU_LISTS]; 240 struct zone_reclaim_stat reclaim_stat; 241 /* Evictions & activations on the inactive file list */ 242 atomic_long_t inactive_age; 243 /* Refaults at the time of last reclaim cycle */ 244 unsigned long refaults; 245 #ifdef CONFIG_MEMCG 246 struct pglist_data *pgdat; 247 #endif 248 }; 249 250 /* Mask used at gathering information at once (see memcontrol.c) */ 251 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 252 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 253 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 254 255 /* Isolate unmapped file */ 256 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) 257 /* Isolate for asynchronous migration */ 258 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 259 /* Isolate unevictable pages */ 260 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 261 262 /* LRU Isolation modes. */ 263 typedef unsigned __bitwise isolate_mode_t; 264 265 enum zone_watermarks { 266 WMARK_MIN, 267 WMARK_LOW, 268 WMARK_HIGH, 269 NR_WMARK 270 }; 271 272 #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) 273 #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) 274 #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) 275 276 struct per_cpu_pages { 277 int count; /* number of pages in the list */ 278 int high; /* high watermark, emptying needed */ 279 int batch; /* chunk size for buddy add/remove */ 280 281 /* Lists of pages, one per migrate type stored on the pcp-lists */ 282 struct list_head lists[MIGRATE_PCPTYPES]; 283 }; 284 285 struct per_cpu_pageset { 286 struct per_cpu_pages pcp; 287 #ifdef CONFIG_NUMA 288 s8 expire; 289 u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS]; 290 #endif 291 #ifdef CONFIG_SMP 292 s8 stat_threshold; 293 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 294 #endif 295 }; 296 297 struct per_cpu_nodestat { 298 s8 stat_threshold; 299 s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; 300 }; 301 302 #endif /* !__GENERATING_BOUNDS.H */ 303 304 enum zone_type { 305 #ifdef CONFIG_ZONE_DMA 306 /* 307 * ZONE_DMA is used when there are devices that are not able 308 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we 309 * carve out the portion of memory that is needed for these devices. 310 * The range is arch specific. 311 * 312 * Some examples 313 * 314 * Architecture Limit 315 * --------------------------- 316 * parisc, ia64, sparc <4G 317 * s390 <2G 318 * arm Various 319 * alpha Unlimited or 0-16MB. 320 * 321 * i386, x86_64 and multiple other arches 322 * <16M. 323 */ 324 ZONE_DMA, 325 #endif 326 #ifdef CONFIG_ZONE_DMA32 327 /* 328 * x86_64 needs two ZONE_DMAs because it supports devices that are 329 * only able to do DMA to the lower 16M but also 32 bit devices that 330 * can only do DMA areas below 4G. 331 */ 332 ZONE_DMA32, 333 #endif 334 /* 335 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 336 * performed on pages in ZONE_NORMAL if the DMA devices support 337 * transfers to all addressable memory. 338 */ 339 ZONE_NORMAL, 340 #ifdef CONFIG_HIGHMEM 341 /* 342 * A memory area that is only addressable by the kernel through 343 * mapping portions into its own address space. This is for example 344 * used by i386 to allow the kernel to address the memory beyond 345 * 900MB. The kernel will set up special mappings (page 346 * table entries on i386) for each page that the kernel needs to 347 * access. 348 */ 349 ZONE_HIGHMEM, 350 #endif 351 ZONE_MOVABLE, 352 #ifdef CONFIG_ZONE_DEVICE 353 ZONE_DEVICE, 354 #endif 355 __MAX_NR_ZONES 356 357 }; 358 359 #ifndef __GENERATING_BOUNDS_H 360 361 struct zone { 362 /* Read-mostly fields */ 363 364 /* zone watermarks, access with *_wmark_pages(zone) macros */ 365 unsigned long watermark[NR_WMARK]; 366 367 unsigned long nr_reserved_highatomic; 368 369 /* 370 * We don't know if the memory that we're going to allocate will be 371 * freeable or/and it will be released eventually, so to avoid totally 372 * wasting several GB of ram we must reserve some of the lower zone 373 * memory (otherwise we risk to run OOM on the lower zones despite 374 * there being tons of freeable ram on the higher zones). This array is 375 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl 376 * changes. 377 */ 378 long lowmem_reserve[MAX_NR_ZONES]; 379 380 #ifdef CONFIG_NUMA 381 int node; 382 #endif 383 struct pglist_data *zone_pgdat; 384 struct per_cpu_pageset __percpu *pageset; 385 386 #ifndef CONFIG_SPARSEMEM 387 /* 388 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 389 * In SPARSEMEM, this map is stored in struct mem_section 390 */ 391 unsigned long *pageblock_flags; 392 #endif /* CONFIG_SPARSEMEM */ 393 394 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 395 unsigned long zone_start_pfn; 396 397 /* 398 * spanned_pages is the total pages spanned by the zone, including 399 * holes, which is calculated as: 400 * spanned_pages = zone_end_pfn - zone_start_pfn; 401 * 402 * present_pages is physical pages existing within the zone, which 403 * is calculated as: 404 * present_pages = spanned_pages - absent_pages(pages in holes); 405 * 406 * managed_pages is present pages managed by the buddy system, which 407 * is calculated as (reserved_pages includes pages allocated by the 408 * bootmem allocator): 409 * managed_pages = present_pages - reserved_pages; 410 * 411 * So present_pages may be used by memory hotplug or memory power 412 * management logic to figure out unmanaged pages by checking 413 * (present_pages - managed_pages). And managed_pages should be used 414 * by page allocator and vm scanner to calculate all kinds of watermarks 415 * and thresholds. 416 * 417 * Locking rules: 418 * 419 * zone_start_pfn and spanned_pages are protected by span_seqlock. 420 * It is a seqlock because it has to be read outside of zone->lock, 421 * and it is done in the main allocator path. But, it is written 422 * quite infrequently. 423 * 424 * The span_seq lock is declared along with zone->lock because it is 425 * frequently read in proximity to zone->lock. It's good to 426 * give them a chance of being in the same cacheline. 427 * 428 * Write access to present_pages at runtime should be protected by 429 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of 430 * present_pages should get_online_mems() to get a stable value. 431 * 432 * Read access to managed_pages should be safe because it's unsigned 433 * long. Write access to zone->managed_pages and totalram_pages are 434 * protected by managed_page_count_lock at runtime. Idealy only 435 * adjust_managed_page_count() should be used instead of directly 436 * touching zone->managed_pages and totalram_pages. 437 */ 438 unsigned long managed_pages; 439 unsigned long spanned_pages; 440 unsigned long present_pages; 441 442 const char *name; 443 444 #ifdef CONFIG_MEMORY_ISOLATION 445 /* 446 * Number of isolated pageblock. It is used to solve incorrect 447 * freepage counting problem due to racy retrieving migratetype 448 * of pageblock. Protected by zone->lock. 449 */ 450 unsigned long nr_isolate_pageblock; 451 #endif 452 453 #ifdef CONFIG_MEMORY_HOTPLUG 454 /* see spanned/present_pages for more description */ 455 seqlock_t span_seqlock; 456 #endif 457 458 int initialized; 459 460 /* Write-intensive fields used from the page allocator */ 461 ZONE_PADDING(_pad1_) 462 463 /* free areas of different sizes */ 464 struct free_area free_area[MAX_ORDER]; 465 466 /* zone flags, see below */ 467 unsigned long flags; 468 469 /* Primarily protects free_area */ 470 spinlock_t lock; 471 472 /* Write-intensive fields used by compaction and vmstats. */ 473 ZONE_PADDING(_pad2_) 474 475 /* 476 * When free pages are below this point, additional steps are taken 477 * when reading the number of free pages to avoid per-cpu counter 478 * drift allowing watermarks to be breached 479 */ 480 unsigned long percpu_drift_mark; 481 482 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 483 /* pfn where compaction free scanner should start */ 484 unsigned long compact_cached_free_pfn; 485 /* pfn where async and sync compaction migration scanner should start */ 486 unsigned long compact_cached_migrate_pfn[2]; 487 #endif 488 489 #ifdef CONFIG_COMPACTION 490 /* 491 * On compaction failure, 1<<compact_defer_shift compactions 492 * are skipped before trying again. The number attempted since 493 * last failure is tracked with compact_considered. 494 */ 495 unsigned int compact_considered; 496 unsigned int compact_defer_shift; 497 int compact_order_failed; 498 #endif 499 500 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 501 /* Set to true when the PG_migrate_skip bits should be cleared */ 502 bool compact_blockskip_flush; 503 #endif 504 505 bool contiguous; 506 507 ZONE_PADDING(_pad3_) 508 /* Zone statistics */ 509 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 510 atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; 511 } ____cacheline_internodealigned_in_smp; 512 513 enum pgdat_flags { 514 PGDAT_CONGESTED, /* pgdat has many dirty pages backed by 515 * a congested BDI 516 */ 517 PGDAT_DIRTY, /* reclaim scanning has recently found 518 * many dirty file pages at the tail 519 * of the LRU. 520 */ 521 PGDAT_WRITEBACK, /* reclaim scanning has recently found 522 * many pages under writeback 523 */ 524 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 525 }; 526 527 static inline unsigned long zone_end_pfn(const struct zone *zone) 528 { 529 return zone->zone_start_pfn + zone->spanned_pages; 530 } 531 532 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) 533 { 534 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); 535 } 536 537 static inline bool zone_is_initialized(struct zone *zone) 538 { 539 return zone->initialized; 540 } 541 542 static inline bool zone_is_empty(struct zone *zone) 543 { 544 return zone->spanned_pages == 0; 545 } 546 547 /* 548 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty 549 * intersection with the given zone 550 */ 551 static inline bool zone_intersects(struct zone *zone, 552 unsigned long start_pfn, unsigned long nr_pages) 553 { 554 if (zone_is_empty(zone)) 555 return false; 556 if (start_pfn >= zone_end_pfn(zone) || 557 start_pfn + nr_pages <= zone->zone_start_pfn) 558 return false; 559 560 return true; 561 } 562 563 /* 564 * The "priority" of VM scanning is how much of the queues we will scan in one 565 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 566 * queues ("queue_length >> 12") during an aging round. 567 */ 568 #define DEF_PRIORITY 12 569 570 /* Maximum number of zones on a zonelist */ 571 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 572 573 enum { 574 ZONELIST_FALLBACK, /* zonelist with fallback */ 575 #ifdef CONFIG_NUMA 576 /* 577 * The NUMA zonelists are doubled because we need zonelists that 578 * restrict the allocations to a single node for __GFP_THISNODE. 579 */ 580 ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ 581 #endif 582 MAX_ZONELISTS 583 }; 584 585 /* 586 * This struct contains information about a zone in a zonelist. It is stored 587 * here to avoid dereferences into large structures and lookups of tables 588 */ 589 struct zoneref { 590 struct zone *zone; /* Pointer to actual zone */ 591 int zone_idx; /* zone_idx(zoneref->zone) */ 592 }; 593 594 /* 595 * One allocation request operates on a zonelist. A zonelist 596 * is a list of zones, the first one is the 'goal' of the 597 * allocation, the other zones are fallback zones, in decreasing 598 * priority. 599 * 600 * To speed the reading of the zonelist, the zonerefs contain the zone index 601 * of the entry being read. Helper functions to access information given 602 * a struct zoneref are 603 * 604 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 605 * zonelist_zone_idx() - Return the index of the zone for an entry 606 * zonelist_node_idx() - Return the index of the node for an entry 607 */ 608 struct zonelist { 609 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 610 }; 611 612 #ifndef CONFIG_DISCONTIGMEM 613 /* The array of struct pages - for discontigmem use pgdat->lmem_map */ 614 extern struct page *mem_map; 615 #endif 616 617 /* 618 * On NUMA machines, each NUMA node would have a pg_data_t to describe 619 * it's memory layout. On UMA machines there is a single pglist_data which 620 * describes the whole memory. 621 * 622 * Memory statistics and page replacement data structures are maintained on a 623 * per-zone basis. 624 */ 625 struct bootmem_data; 626 typedef struct pglist_data { 627 struct zone node_zones[MAX_NR_ZONES]; 628 struct zonelist node_zonelists[MAX_ZONELISTS]; 629 int nr_zones; 630 #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 631 struct page *node_mem_map; 632 #ifdef CONFIG_PAGE_EXTENSION 633 struct page_ext *node_page_ext; 634 #endif 635 #endif 636 #ifndef CONFIG_NO_BOOTMEM 637 struct bootmem_data *bdata; 638 #endif 639 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) 640 /* 641 * Must be held any time you expect node_start_pfn, node_present_pages 642 * or node_spanned_pages stay constant. Holding this will also 643 * guarantee that any pfn_valid() stays that way. 644 * 645 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to 646 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG 647 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. 648 * 649 * Nests above zone->lock and zone->span_seqlock 650 */ 651 spinlock_t node_size_lock; 652 #endif 653 unsigned long node_start_pfn; 654 unsigned long node_present_pages; /* total number of physical pages */ 655 unsigned long node_spanned_pages; /* total size of physical page 656 range, including holes */ 657 int node_id; 658 wait_queue_head_t kswapd_wait; 659 wait_queue_head_t pfmemalloc_wait; 660 struct task_struct *kswapd; /* Protected by 661 mem_hotplug_begin/end() */ 662 int kswapd_order; 663 enum zone_type kswapd_classzone_idx; 664 665 int kswapd_failures; /* Number of 'reclaimed == 0' runs */ 666 667 #ifdef CONFIG_COMPACTION 668 int kcompactd_max_order; 669 enum zone_type kcompactd_classzone_idx; 670 wait_queue_head_t kcompactd_wait; 671 struct task_struct *kcompactd; 672 #endif 673 /* 674 * This is a per-node reserve of pages that are not available 675 * to userspace allocations. 676 */ 677 unsigned long totalreserve_pages; 678 679 #ifdef CONFIG_NUMA 680 /* 681 * zone reclaim becomes active if more unmapped pages exist. 682 */ 683 unsigned long min_unmapped_pages; 684 unsigned long min_slab_pages; 685 #endif /* CONFIG_NUMA */ 686 687 /* Write-intensive fields used by page reclaim */ 688 ZONE_PADDING(_pad1_) 689 spinlock_t lru_lock; 690 691 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 692 /* 693 * If memory initialisation on large machines is deferred then this 694 * is the first PFN that needs to be initialised. 695 */ 696 unsigned long first_deferred_pfn; 697 /* Number of non-deferred pages */ 698 unsigned long static_init_pgcnt; 699 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 700 701 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 702 spinlock_t split_queue_lock; 703 struct list_head split_queue; 704 unsigned long split_queue_len; 705 #endif 706 707 /* Fields commonly accessed by the page reclaim scanner */ 708 struct lruvec lruvec; 709 710 unsigned long flags; 711 712 ZONE_PADDING(_pad2_) 713 714 /* Per-node vmstats */ 715 struct per_cpu_nodestat __percpu *per_cpu_nodestats; 716 atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; 717 } pg_data_t; 718 719 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 720 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 721 #ifdef CONFIG_FLAT_NODE_MEM_MAP 722 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 723 #else 724 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 725 #endif 726 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 727 728 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 729 #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) 730 static inline spinlock_t *zone_lru_lock(struct zone *zone) 731 { 732 return &zone->zone_pgdat->lru_lock; 733 } 734 735 static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) 736 { 737 return &pgdat->lruvec; 738 } 739 740 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) 741 { 742 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 743 } 744 745 static inline bool pgdat_is_empty(pg_data_t *pgdat) 746 { 747 return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; 748 } 749 750 #include <linux/memory_hotplug.h> 751 752 void build_all_zonelists(pg_data_t *pgdat); 753 void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, 754 enum zone_type classzone_idx); 755 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 756 int classzone_idx, unsigned int alloc_flags, 757 long free_pages); 758 bool zone_watermark_ok(struct zone *z, unsigned int order, 759 unsigned long mark, int classzone_idx, 760 unsigned int alloc_flags); 761 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 762 unsigned long mark, int classzone_idx); 763 enum memmap_context { 764 MEMMAP_EARLY, 765 MEMMAP_HOTPLUG, 766 }; 767 extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 768 unsigned long size); 769 770 extern void lruvec_init(struct lruvec *lruvec); 771 772 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) 773 { 774 #ifdef CONFIG_MEMCG 775 return lruvec->pgdat; 776 #else 777 return container_of(lruvec, struct pglist_data, lruvec); 778 #endif 779 } 780 781 extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); 782 783 #ifdef CONFIG_HAVE_MEMORY_PRESENT 784 void memory_present(int nid, unsigned long start, unsigned long end); 785 #else 786 static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 787 #endif 788 789 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 790 int local_memory_node(int node_id); 791 #else 792 static inline int local_memory_node(int node_id) { return node_id; }; 793 #endif 794 795 /* 796 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 797 */ 798 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 799 800 #ifdef CONFIG_ZONE_DEVICE 801 static inline bool is_dev_zone(const struct zone *zone) 802 { 803 return zone_idx(zone) == ZONE_DEVICE; 804 } 805 #else 806 static inline bool is_dev_zone(const struct zone *zone) 807 { 808 return false; 809 } 810 #endif 811 812 /* 813 * Returns true if a zone has pages managed by the buddy allocator. 814 * All the reclaim decisions have to use this function rather than 815 * populated_zone(). If the whole zone is reserved then we can easily 816 * end up with populated_zone() && !managed_zone(). 817 */ 818 static inline bool managed_zone(struct zone *zone) 819 { 820 return zone->managed_pages; 821 } 822 823 /* Returns true if a zone has memory */ 824 static inline bool populated_zone(struct zone *zone) 825 { 826 return zone->present_pages; 827 } 828 829 #ifdef CONFIG_NUMA 830 static inline int zone_to_nid(struct zone *zone) 831 { 832 return zone->node; 833 } 834 835 static inline void zone_set_nid(struct zone *zone, int nid) 836 { 837 zone->node = nid; 838 } 839 #else 840 static inline int zone_to_nid(struct zone *zone) 841 { 842 return 0; 843 } 844 845 static inline void zone_set_nid(struct zone *zone, int nid) {} 846 #endif 847 848 extern int movable_zone; 849 850 #ifdef CONFIG_HIGHMEM 851 static inline int zone_movable_is_highmem(void) 852 { 853 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 854 return movable_zone == ZONE_HIGHMEM; 855 #else 856 return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; 857 #endif 858 } 859 #endif 860 861 static inline int is_highmem_idx(enum zone_type idx) 862 { 863 #ifdef CONFIG_HIGHMEM 864 return (idx == ZONE_HIGHMEM || 865 (idx == ZONE_MOVABLE && zone_movable_is_highmem())); 866 #else 867 return 0; 868 #endif 869 } 870 871 /** 872 * is_highmem - helper function to quickly check if a struct zone is a 873 * highmem zone or not. This is an attempt to keep references 874 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 875 * @zone - pointer to struct zone variable 876 */ 877 static inline int is_highmem(struct zone *zone) 878 { 879 #ifdef CONFIG_HIGHMEM 880 return is_highmem_idx(zone_idx(zone)); 881 #else 882 return 0; 883 #endif 884 } 885 886 /* These two functions are used to setup the per zone pages min values */ 887 struct ctl_table; 888 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, 889 void __user *, size_t *, loff_t *); 890 int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, 891 void __user *, size_t *, loff_t *); 892 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; 893 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, 894 void __user *, size_t *, loff_t *); 895 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, 896 void __user *, size_t *, loff_t *); 897 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 898 void __user *, size_t *, loff_t *); 899 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 900 void __user *, size_t *, loff_t *); 901 902 extern int numa_zonelist_order_handler(struct ctl_table *, int, 903 void __user *, size_t *, loff_t *); 904 extern char numa_zonelist_order[]; 905 #define NUMA_ZONELIST_ORDER_LEN 16 906 907 #ifndef CONFIG_NEED_MULTIPLE_NODES 908 909 extern struct pglist_data contig_page_data; 910 #define NODE_DATA(nid) (&contig_page_data) 911 #define NODE_MEM_MAP(nid) mem_map 912 913 #else /* CONFIG_NEED_MULTIPLE_NODES */ 914 915 #include <asm/mmzone.h> 916 917 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 918 919 extern struct pglist_data *first_online_pgdat(void); 920 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 921 extern struct zone *next_zone(struct zone *zone); 922 923 /** 924 * for_each_online_pgdat - helper macro to iterate over all online nodes 925 * @pgdat - pointer to a pg_data_t variable 926 */ 927 #define for_each_online_pgdat(pgdat) \ 928 for (pgdat = first_online_pgdat(); \ 929 pgdat; \ 930 pgdat = next_online_pgdat(pgdat)) 931 /** 932 * for_each_zone - helper macro to iterate over all memory zones 933 * @zone - pointer to struct zone variable 934 * 935 * The user only needs to declare the zone variable, for_each_zone 936 * fills it in. 937 */ 938 #define for_each_zone(zone) \ 939 for (zone = (first_online_pgdat())->node_zones; \ 940 zone; \ 941 zone = next_zone(zone)) 942 943 #define for_each_populated_zone(zone) \ 944 for (zone = (first_online_pgdat())->node_zones; \ 945 zone; \ 946 zone = next_zone(zone)) \ 947 if (!populated_zone(zone)) \ 948 ; /* do nothing */ \ 949 else 950 951 static inline struct zone *zonelist_zone(struct zoneref *zoneref) 952 { 953 return zoneref->zone; 954 } 955 956 static inline int zonelist_zone_idx(struct zoneref *zoneref) 957 { 958 return zoneref->zone_idx; 959 } 960 961 static inline int zonelist_node_idx(struct zoneref *zoneref) 962 { 963 return zone_to_nid(zoneref->zone); 964 } 965 966 struct zoneref *__next_zones_zonelist(struct zoneref *z, 967 enum zone_type highest_zoneidx, 968 nodemask_t *nodes); 969 970 /** 971 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 972 * @z - The cursor used as a starting point for the search 973 * @highest_zoneidx - The zone index of the highest zone to return 974 * @nodes - An optional nodemask to filter the zonelist with 975 * 976 * This function returns the next zone at or below a given zone index that is 977 * within the allowed nodemask using a cursor as the starting point for the 978 * search. The zoneref returned is a cursor that represents the current zone 979 * being examined. It should be advanced by one before calling 980 * next_zones_zonelist again. 981 */ 982 static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, 983 enum zone_type highest_zoneidx, 984 nodemask_t *nodes) 985 { 986 if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx)) 987 return z; 988 return __next_zones_zonelist(z, highest_zoneidx, nodes); 989 } 990 991 /** 992 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 993 * @zonelist - The zonelist to search for a suitable zone 994 * @highest_zoneidx - The zone index of the highest zone to return 995 * @nodes - An optional nodemask to filter the zonelist with 996 * @return - Zoneref pointer for the first suitable zone found (see below) 997 * 998 * This function returns the first zone at or below a given zone index that is 999 * within the allowed nodemask. The zoneref returned is a cursor that can be 1000 * used to iterate the zonelist with next_zones_zonelist by advancing it by 1001 * one before calling. 1002 * 1003 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is 1004 * never NULL). This may happen either genuinely, or due to concurrent nodemask 1005 * update due to cpuset modification. 1006 */ 1007 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 1008 enum zone_type highest_zoneidx, 1009 nodemask_t *nodes) 1010 { 1011 return next_zones_zonelist(zonelist->_zonerefs, 1012 highest_zoneidx, nodes); 1013 } 1014 1015 /** 1016 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 1017 * @zone - The current zone in the iterator 1018 * @z - The current pointer within zonelist->zones being iterated 1019 * @zlist - The zonelist being iterated 1020 * @highidx - The zone index of the highest zone to return 1021 * @nodemask - Nodemask allowed by the allocator 1022 * 1023 * This iterator iterates though all zones at or below a given zone index and 1024 * within a given nodemask 1025 */ 1026 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1027 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \ 1028 zone; \ 1029 z = next_zones_zonelist(++z, highidx, nodemask), \ 1030 zone = zonelist_zone(z)) 1031 1032 #define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 1033 for (zone = z->zone; \ 1034 zone; \ 1035 z = next_zones_zonelist(++z, highidx, nodemask), \ 1036 zone = zonelist_zone(z)) 1037 1038 1039 /** 1040 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 1041 * @zone - The current zone in the iterator 1042 * @z - The current pointer within zonelist->zones being iterated 1043 * @zlist - The zonelist being iterated 1044 * @highidx - The zone index of the highest zone to return 1045 * 1046 * This iterator iterates though all zones at or below a given zone index. 1047 */ 1048 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 1049 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 1050 1051 #ifdef CONFIG_SPARSEMEM 1052 #include <asm/sparsemem.h> 1053 #endif 1054 1055 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 1056 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1057 static inline unsigned long early_pfn_to_nid(unsigned long pfn) 1058 { 1059 BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA)); 1060 return 0; 1061 } 1062 #endif 1063 1064 #ifdef CONFIG_FLATMEM 1065 #define pfn_to_nid(pfn) (0) 1066 #endif 1067 1068 #ifdef CONFIG_SPARSEMEM 1069 1070 /* 1071 * SECTION_SHIFT #bits space required to store a section # 1072 * 1073 * PA_SECTION_SHIFT physical address to/from section number 1074 * PFN_SECTION_SHIFT pfn to/from section number 1075 */ 1076 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1077 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1078 1079 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1080 1081 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1082 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1083 1084 #define SECTION_BLOCKFLAGS_BITS \ 1085 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1086 1087 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1088 #error Allocator MAX_ORDER exceeds SECTION_SIZE 1089 #endif 1090 1091 static inline unsigned long pfn_to_section_nr(unsigned long pfn) 1092 { 1093 return pfn >> PFN_SECTION_SHIFT; 1094 } 1095 static inline unsigned long section_nr_to_pfn(unsigned long sec) 1096 { 1097 return sec << PFN_SECTION_SHIFT; 1098 } 1099 1100 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1101 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1102 1103 struct page; 1104 struct page_ext; 1105 struct mem_section { 1106 /* 1107 * This is, logically, a pointer to an array of struct 1108 * pages. However, it is stored with some other magic. 1109 * (see sparse.c::sparse_init_one_section()) 1110 * 1111 * Additionally during early boot we encode node id of 1112 * the location of the section here to guide allocation. 1113 * (see sparse.c::memory_present()) 1114 * 1115 * Making it a UL at least makes someone do a cast 1116 * before using it wrong. 1117 */ 1118 unsigned long section_mem_map; 1119 1120 /* See declaration of similar field in struct zone */ 1121 unsigned long *pageblock_flags; 1122 #ifdef CONFIG_PAGE_EXTENSION 1123 /* 1124 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use 1125 * section. (see page_ext.h about this.) 1126 */ 1127 struct page_ext *page_ext; 1128 unsigned long pad; 1129 #endif 1130 /* 1131 * WARNING: mem_section must be a power-of-2 in size for the 1132 * calculation and use of SECTION_ROOT_MASK to make sense. 1133 */ 1134 }; 1135 1136 #ifdef CONFIG_SPARSEMEM_EXTREME 1137 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1138 #else 1139 #define SECTIONS_PER_ROOT 1 1140 #endif 1141 1142 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1143 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1144 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1145 1146 #ifdef CONFIG_SPARSEMEM_EXTREME 1147 extern struct mem_section **mem_section; 1148 #else 1149 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1150 #endif 1151 1152 static inline struct mem_section *__nr_to_section(unsigned long nr) 1153 { 1154 #ifdef CONFIG_SPARSEMEM_EXTREME 1155 if (!mem_section) 1156 return NULL; 1157 #endif 1158 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 1159 return NULL; 1160 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 1161 } 1162 extern int __section_nr(struct mem_section* ms); 1163 extern unsigned long usemap_size(void); 1164 1165 /* 1166 * We use the lower bits of the mem_map pointer to store 1167 * a little bit of information. The pointer is calculated 1168 * as mem_map - section_nr_to_pfn(pnum). The result is 1169 * aligned to the minimum alignment of the two values: 1170 * 1. All mem_map arrays are page-aligned. 1171 * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT 1172 * lowest bits. PFN_SECTION_SHIFT is arch-specific 1173 * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the 1174 * worst combination is powerpc with 256k pages, 1175 * which results in PFN_SECTION_SHIFT equal 6. 1176 * To sum it up, at least 6 bits are available. 1177 */ 1178 #define SECTION_MARKED_PRESENT (1UL<<0) 1179 #define SECTION_HAS_MEM_MAP (1UL<<1) 1180 #define SECTION_IS_ONLINE (1UL<<2) 1181 #define SECTION_MAP_LAST_BIT (1UL<<3) 1182 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1183 #define SECTION_NID_SHIFT 3 1184 1185 static inline struct page *__section_mem_map_addr(struct mem_section *section) 1186 { 1187 unsigned long map = section->section_mem_map; 1188 map &= SECTION_MAP_MASK; 1189 return (struct page *)map; 1190 } 1191 1192 static inline int present_section(struct mem_section *section) 1193 { 1194 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 1195 } 1196 1197 static inline int present_section_nr(unsigned long nr) 1198 { 1199 return present_section(__nr_to_section(nr)); 1200 } 1201 1202 static inline int valid_section(struct mem_section *section) 1203 { 1204 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 1205 } 1206 1207 static inline int valid_section_nr(unsigned long nr) 1208 { 1209 return valid_section(__nr_to_section(nr)); 1210 } 1211 1212 static inline int online_section(struct mem_section *section) 1213 { 1214 return (section && (section->section_mem_map & SECTION_IS_ONLINE)); 1215 } 1216 1217 static inline int online_section_nr(unsigned long nr) 1218 { 1219 return online_section(__nr_to_section(nr)); 1220 } 1221 1222 #ifdef CONFIG_MEMORY_HOTPLUG 1223 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1224 #ifdef CONFIG_MEMORY_HOTREMOVE 1225 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); 1226 #endif 1227 #endif 1228 1229 static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1230 { 1231 return __nr_to_section(pfn_to_section_nr(pfn)); 1232 } 1233 1234 extern int __highest_present_section_nr; 1235 1236 #ifndef CONFIG_HAVE_ARCH_PFN_VALID 1237 static inline int pfn_valid(unsigned long pfn) 1238 { 1239 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1240 return 0; 1241 return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); 1242 } 1243 #endif 1244 1245 static inline int pfn_present(unsigned long pfn) 1246 { 1247 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1248 return 0; 1249 return present_section(__nr_to_section(pfn_to_section_nr(pfn))); 1250 } 1251 1252 /* 1253 * These are _only_ used during initialisation, therefore they 1254 * can use __initdata ... They could have names to indicate 1255 * this restriction. 1256 */ 1257 #ifdef CONFIG_NUMA 1258 #define pfn_to_nid(pfn) \ 1259 ({ \ 1260 unsigned long __pfn_to_nid_pfn = (pfn); \ 1261 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 1262 }) 1263 #else 1264 #define pfn_to_nid(pfn) (0) 1265 #endif 1266 1267 #define early_pfn_valid(pfn) pfn_valid(pfn) 1268 void sparse_init(void); 1269 #else 1270 #define sparse_init() do {} while (0) 1271 #define sparse_index_init(_sec, _nid) do {} while (0) 1272 #endif /* CONFIG_SPARSEMEM */ 1273 1274 /* 1275 * During memory init memblocks map pfns to nids. The search is expensive and 1276 * this caches recent lookups. The implementation of __early_pfn_to_nid 1277 * may treat start/end as pfns or sections. 1278 */ 1279 struct mminit_pfnnid_cache { 1280 unsigned long last_start; 1281 unsigned long last_end; 1282 int last_nid; 1283 }; 1284 1285 #ifndef early_pfn_valid 1286 #define early_pfn_valid(pfn) (1) 1287 #endif 1288 1289 void memory_present(int nid, unsigned long start, unsigned long end); 1290 1291 /* 1292 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 1293 * need to check pfn validility within that MAX_ORDER_NR_PAGES block. 1294 * pfn_valid_within() should be used in this case; we optimise this away 1295 * when we have no holes within a MAX_ORDER_NR_PAGES block. 1296 */ 1297 #ifdef CONFIG_HOLES_IN_ZONE 1298 #define pfn_valid_within(pfn) pfn_valid(pfn) 1299 #else 1300 #define pfn_valid_within(pfn) (1) 1301 #endif 1302 1303 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1304 /* 1305 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1306 * associated with it or not. This means that a struct page exists for this 1307 * pfn. The caller cannot assume the page is fully initialized in general. 1308 * Hotplugable pages might not have been onlined yet. pfn_to_online_page() 1309 * will ensure the struct page is fully online and initialized. Special pages 1310 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly. 1311 * 1312 * In FLATMEM, it is expected that holes always have valid memmap as long as 1313 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed 1314 * that a valid section has a memmap for the entire section. 1315 * 1316 * However, an ARM, and maybe other embedded architectures in the future 1317 * free memmap backing holes to save memory on the assumption the memmap is 1318 * never used. The page_zone linkages are then broken even though pfn_valid() 1319 * returns true. A walker of the full memmap must then do this additional 1320 * check to ensure the memmap they are looking at is sane by making sure 1321 * the zone and PFN linkages are still valid. This is expensive, but walkers 1322 * of the full memmap are extremely rare. 1323 */ 1324 bool memmap_valid_within(unsigned long pfn, 1325 struct page *page, struct zone *zone); 1326 #else 1327 static inline bool memmap_valid_within(unsigned long pfn, 1328 struct page *page, struct zone *zone) 1329 { 1330 return true; 1331 } 1332 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 1333 1334 #endif /* !__GENERATING_BOUNDS.H */ 1335 #endif /* !__ASSEMBLY__ */ 1336 #endif /* _LINUX_MMZONE_H */ 1337