1 #ifndef _LINUX_MMZONE_H 2 #define _LINUX_MMZONE_H 3 4 #ifndef __ASSEMBLY__ 5 #ifndef __GENERATING_BOUNDS_H 6 7 #include <linux/spinlock.h> 8 #include <linux/list.h> 9 #include <linux/wait.h> 10 #include <linux/bitops.h> 11 #include <linux/cache.h> 12 #include <linux/threads.h> 13 #include <linux/numa.h> 14 #include <linux/init.h> 15 #include <linux/seqlock.h> 16 #include <linux/nodemask.h> 17 #include <linux/pageblock-flags.h> 18 #include <generated/bounds.h> 19 #include <linux/atomic.h> 20 #include <asm/page.h> 21 22 /* Free memory management - zoned buddy allocator. */ 23 #ifndef CONFIG_FORCE_MAX_ZONEORDER 24 #define MAX_ORDER 11 25 #else 26 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER 27 #endif 28 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 29 30 /* 31 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed 32 * costly to service. That is between allocation orders which should 33 * coalesce naturally under reasonable reclaim pressure and those which 34 * will not. 35 */ 36 #define PAGE_ALLOC_COSTLY_ORDER 3 37 38 enum { 39 MIGRATE_UNMOVABLE, 40 MIGRATE_RECLAIMABLE, 41 MIGRATE_MOVABLE, 42 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ 43 MIGRATE_RESERVE = MIGRATE_PCPTYPES, 44 #ifdef CONFIG_CMA 45 /* 46 * MIGRATE_CMA migration type is designed to mimic the way 47 * ZONE_MOVABLE works. Only movable pages can be allocated 48 * from MIGRATE_CMA pageblocks and page allocator never 49 * implicitly change migration type of MIGRATE_CMA pageblock. 50 * 51 * The way to use it is to change migratetype of a range of 52 * pageblocks to MIGRATE_CMA which can be done by 53 * __free_pageblock_cma() function. What is important though 54 * is that a range of pageblocks must be aligned to 55 * MAX_ORDER_NR_PAGES should biggest page be bigger then 56 * a single pageblock. 57 */ 58 MIGRATE_CMA, 59 #endif 60 MIGRATE_ISOLATE, /* can't allocate from here */ 61 MIGRATE_TYPES 62 }; 63 64 #ifdef CONFIG_CMA 65 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) 66 #else 67 # define is_migrate_cma(migratetype) false 68 #endif 69 70 #define for_each_migratetype_order(order, type) \ 71 for (order = 0; order < MAX_ORDER; order++) \ 72 for (type = 0; type < MIGRATE_TYPES; type++) 73 74 extern int page_group_by_mobility_disabled; 75 76 static inline int get_pageblock_migratetype(struct page *page) 77 { 78 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); 79 } 80 81 struct free_area { 82 struct list_head free_list[MIGRATE_TYPES]; 83 unsigned long nr_free; 84 }; 85 86 struct pglist_data; 87 88 /* 89 * zone->lock and zone->lru_lock are two of the hottest locks in the kernel. 90 * So add a wild amount of padding here to ensure that they fall into separate 91 * cachelines. There are very few zone structures in the machine, so space 92 * consumption is not a concern here. 93 */ 94 #if defined(CONFIG_SMP) 95 struct zone_padding { 96 char x[0]; 97 } ____cacheline_internodealigned_in_smp; 98 #define ZONE_PADDING(name) struct zone_padding name; 99 #else 100 #define ZONE_PADDING(name) 101 #endif 102 103 enum zone_stat_item { 104 /* First 128 byte cacheline (assuming 64 bit words) */ 105 NR_FREE_PAGES, 106 NR_LRU_BASE, 107 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ 108 NR_ACTIVE_ANON, /* " " " " " */ 109 NR_INACTIVE_FILE, /* " " " " " */ 110 NR_ACTIVE_FILE, /* " " " " " */ 111 NR_UNEVICTABLE, /* " " " " " */ 112 NR_MLOCK, /* mlock()ed pages found and moved off LRU */ 113 NR_ANON_PAGES, /* Mapped anonymous pages */ 114 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 115 only modified from process context */ 116 NR_FILE_PAGES, 117 NR_FILE_DIRTY, 118 NR_WRITEBACK, 119 NR_SLAB_RECLAIMABLE, 120 NR_SLAB_UNRECLAIMABLE, 121 NR_PAGETABLE, /* used for pagetables */ 122 NR_KERNEL_STACK, 123 /* Second 128 byte cacheline */ 124 NR_UNSTABLE_NFS, /* NFS unstable pages */ 125 NR_BOUNCE, 126 NR_VMSCAN_WRITE, 127 NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ 128 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 129 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 130 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 131 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 132 NR_DIRTIED, /* page dirtyings since bootup */ 133 NR_WRITTEN, /* page writings since bootup */ 134 #ifdef CONFIG_NUMA 135 NUMA_HIT, /* allocated in intended node */ 136 NUMA_MISS, /* allocated in non intended node */ 137 NUMA_FOREIGN, /* was intended here, hit elsewhere */ 138 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ 139 NUMA_LOCAL, /* allocation from local node */ 140 NUMA_OTHER, /* allocation from other node */ 141 #endif 142 NR_ANON_TRANSPARENT_HUGEPAGES, 143 NR_FREE_CMA_PAGES, 144 NR_VM_ZONE_STAT_ITEMS }; 145 146 /* 147 * We do arithmetic on the LRU lists in various places in the code, 148 * so it is important to keep the active lists LRU_ACTIVE higher in 149 * the array than the corresponding inactive lists, and to keep 150 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. 151 * 152 * This has to be kept in sync with the statistics in zone_stat_item 153 * above and the descriptions in vmstat_text in mm/vmstat.c 154 */ 155 #define LRU_BASE 0 156 #define LRU_ACTIVE 1 157 #define LRU_FILE 2 158 159 enum lru_list { 160 LRU_INACTIVE_ANON = LRU_BASE, 161 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, 162 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, 163 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, 164 LRU_UNEVICTABLE, 165 NR_LRU_LISTS 166 }; 167 168 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) 169 170 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) 171 172 static inline int is_file_lru(enum lru_list lru) 173 { 174 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); 175 } 176 177 static inline int is_active_lru(enum lru_list lru) 178 { 179 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); 180 } 181 182 static inline int is_unevictable_lru(enum lru_list lru) 183 { 184 return (lru == LRU_UNEVICTABLE); 185 } 186 187 struct zone_reclaim_stat { 188 /* 189 * The pageout code in vmscan.c keeps track of how many of the 190 * mem/swap backed and file backed pages are referenced. 191 * The higher the rotated/scanned ratio, the more valuable 192 * that cache is. 193 * 194 * The anon LRU stats live in [0], file LRU stats in [1] 195 */ 196 unsigned long recent_rotated[2]; 197 unsigned long recent_scanned[2]; 198 }; 199 200 struct lruvec { 201 struct list_head lists[NR_LRU_LISTS]; 202 struct zone_reclaim_stat reclaim_stat; 203 #ifdef CONFIG_MEMCG 204 struct zone *zone; 205 #endif 206 }; 207 208 /* Mask used at gathering information at once (see memcontrol.c) */ 209 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE)) 210 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON)) 211 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 212 213 /* Isolate clean file */ 214 #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) 215 /* Isolate unmapped file */ 216 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) 217 /* Isolate for asynchronous migration */ 218 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) 219 /* Isolate unevictable pages */ 220 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) 221 222 /* LRU Isolation modes. */ 223 typedef unsigned __bitwise__ isolate_mode_t; 224 225 enum zone_watermarks { 226 WMARK_MIN, 227 WMARK_LOW, 228 WMARK_HIGH, 229 NR_WMARK 230 }; 231 232 #define min_wmark_pages(z) (z->watermark[WMARK_MIN]) 233 #define low_wmark_pages(z) (z->watermark[WMARK_LOW]) 234 #define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) 235 236 struct per_cpu_pages { 237 int count; /* number of pages in the list */ 238 int high; /* high watermark, emptying needed */ 239 int batch; /* chunk size for buddy add/remove */ 240 241 /* Lists of pages, one per migrate type stored on the pcp-lists */ 242 struct list_head lists[MIGRATE_PCPTYPES]; 243 }; 244 245 struct per_cpu_pageset { 246 struct per_cpu_pages pcp; 247 #ifdef CONFIG_NUMA 248 s8 expire; 249 #endif 250 #ifdef CONFIG_SMP 251 s8 stat_threshold; 252 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 253 #endif 254 }; 255 256 #endif /* !__GENERATING_BOUNDS.H */ 257 258 enum zone_type { 259 #ifdef CONFIG_ZONE_DMA 260 /* 261 * ZONE_DMA is used when there are devices that are not able 262 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we 263 * carve out the portion of memory that is needed for these devices. 264 * The range is arch specific. 265 * 266 * Some examples 267 * 268 * Architecture Limit 269 * --------------------------- 270 * parisc, ia64, sparc <4G 271 * s390 <2G 272 * arm Various 273 * alpha Unlimited or 0-16MB. 274 * 275 * i386, x86_64 and multiple other arches 276 * <16M. 277 */ 278 ZONE_DMA, 279 #endif 280 #ifdef CONFIG_ZONE_DMA32 281 /* 282 * x86_64 needs two ZONE_DMAs because it supports devices that are 283 * only able to do DMA to the lower 16M but also 32 bit devices that 284 * can only do DMA areas below 4G. 285 */ 286 ZONE_DMA32, 287 #endif 288 /* 289 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be 290 * performed on pages in ZONE_NORMAL if the DMA devices support 291 * transfers to all addressable memory. 292 */ 293 ZONE_NORMAL, 294 #ifdef CONFIG_HIGHMEM 295 /* 296 * A memory area that is only addressable by the kernel through 297 * mapping portions into its own address space. This is for example 298 * used by i386 to allow the kernel to address the memory beyond 299 * 900MB. The kernel will set up special mappings (page 300 * table entries on i386) for each page that the kernel needs to 301 * access. 302 */ 303 ZONE_HIGHMEM, 304 #endif 305 ZONE_MOVABLE, 306 __MAX_NR_ZONES 307 }; 308 309 #ifndef __GENERATING_BOUNDS_H 310 311 /* 312 * When a memory allocation must conform to specific limitations (such 313 * as being suitable for DMA) the caller will pass in hints to the 314 * allocator in the gfp_mask, in the zone modifier bits. These bits 315 * are used to select a priority ordered list of memory zones which 316 * match the requested limits. See gfp_zone() in include/linux/gfp.h 317 */ 318 319 #if MAX_NR_ZONES < 2 320 #define ZONES_SHIFT 0 321 #elif MAX_NR_ZONES <= 2 322 #define ZONES_SHIFT 1 323 #elif MAX_NR_ZONES <= 4 324 #define ZONES_SHIFT 2 325 #else 326 #error ZONES_SHIFT -- too many zones configured adjust calculation 327 #endif 328 329 struct zone { 330 /* Fields commonly accessed by the page allocator */ 331 332 /* zone watermarks, access with *_wmark_pages(zone) macros */ 333 unsigned long watermark[NR_WMARK]; 334 335 /* 336 * When free pages are below this point, additional steps are taken 337 * when reading the number of free pages to avoid per-cpu counter 338 * drift allowing watermarks to be breached 339 */ 340 unsigned long percpu_drift_mark; 341 342 /* 343 * We don't know if the memory that we're going to allocate will be freeable 344 * or/and it will be released eventually, so to avoid totally wasting several 345 * GB of ram we must reserve some of the lower zone memory (otherwise we risk 346 * to run OOM on the lower zones despite there's tons of freeable ram 347 * on the higher zones). This array is recalculated at runtime if the 348 * sysctl_lowmem_reserve_ratio sysctl changes. 349 */ 350 unsigned long lowmem_reserve[MAX_NR_ZONES]; 351 352 /* 353 * This is a per-zone reserve of pages that should not be 354 * considered dirtyable memory. 355 */ 356 unsigned long dirty_balance_reserve; 357 358 #ifdef CONFIG_NUMA 359 int node; 360 /* 361 * zone reclaim becomes active if more unmapped pages exist. 362 */ 363 unsigned long min_unmapped_pages; 364 unsigned long min_slab_pages; 365 #endif 366 struct per_cpu_pageset __percpu *pageset; 367 /* 368 * free areas of different sizes 369 */ 370 spinlock_t lock; 371 int all_unreclaimable; /* All pages pinned */ 372 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 373 /* Set to true when the PG_migrate_skip bits should be cleared */ 374 bool compact_blockskip_flush; 375 376 /* pfns where compaction scanners should start */ 377 unsigned long compact_cached_free_pfn; 378 unsigned long compact_cached_migrate_pfn; 379 #endif 380 #ifdef CONFIG_MEMORY_HOTPLUG 381 /* see spanned/present_pages for more description */ 382 seqlock_t span_seqlock; 383 #endif 384 struct free_area free_area[MAX_ORDER]; 385 386 #ifndef CONFIG_SPARSEMEM 387 /* 388 * Flags for a pageblock_nr_pages block. See pageblock-flags.h. 389 * In SPARSEMEM, this map is stored in struct mem_section 390 */ 391 unsigned long *pageblock_flags; 392 #endif /* CONFIG_SPARSEMEM */ 393 394 #ifdef CONFIG_COMPACTION 395 /* 396 * On compaction failure, 1<<compact_defer_shift compactions 397 * are skipped before trying again. The number attempted since 398 * last failure is tracked with compact_considered. 399 */ 400 unsigned int compact_considered; 401 unsigned int compact_defer_shift; 402 int compact_order_failed; 403 #endif 404 405 ZONE_PADDING(_pad1_) 406 407 /* Fields commonly accessed by the page reclaim scanner */ 408 spinlock_t lru_lock; 409 struct lruvec lruvec; 410 411 unsigned long pages_scanned; /* since last reclaim */ 412 unsigned long flags; /* zone flags, see below */ 413 414 /* Zone statistics */ 415 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 416 417 /* 418 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on 419 * this zone's LRU. Maintained by the pageout code. 420 */ 421 unsigned int inactive_ratio; 422 423 424 ZONE_PADDING(_pad2_) 425 /* Rarely used or read-mostly fields */ 426 427 /* 428 * wait_table -- the array holding the hash table 429 * wait_table_hash_nr_entries -- the size of the hash table array 430 * wait_table_bits -- wait_table_size == (1 << wait_table_bits) 431 * 432 * The purpose of all these is to keep track of the people 433 * waiting for a page to become available and make them 434 * runnable again when possible. The trouble is that this 435 * consumes a lot of space, especially when so few things 436 * wait on pages at a given time. So instead of using 437 * per-page waitqueues, we use a waitqueue hash table. 438 * 439 * The bucket discipline is to sleep on the same queue when 440 * colliding and wake all in that wait queue when removing. 441 * When something wakes, it must check to be sure its page is 442 * truly available, a la thundering herd. The cost of a 443 * collision is great, but given the expected load of the 444 * table, they should be so rare as to be outweighed by the 445 * benefits from the saved space. 446 * 447 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the 448 * primary users of these fields, and in mm/page_alloc.c 449 * free_area_init_core() performs the initialization of them. 450 */ 451 wait_queue_head_t * wait_table; 452 unsigned long wait_table_hash_nr_entries; 453 unsigned long wait_table_bits; 454 455 /* 456 * Discontig memory support fields. 457 */ 458 struct pglist_data *zone_pgdat; 459 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ 460 unsigned long zone_start_pfn; 461 462 /* 463 * zone_start_pfn, spanned_pages and present_pages are all 464 * protected by span_seqlock. It is a seqlock because it has 465 * to be read outside of zone->lock, and it is done in the main 466 * allocator path. But, it is written quite infrequently. 467 * 468 * The lock is declared along with zone->lock because it is 469 * frequently read in proximity to zone->lock. It's good to 470 * give them a chance of being in the same cacheline. 471 */ 472 unsigned long spanned_pages; /* total size, including holes */ 473 unsigned long present_pages; /* amount of memory (excluding holes) */ 474 475 /* 476 * rarely used fields: 477 */ 478 const char *name; 479 #ifdef CONFIG_MEMORY_ISOLATION 480 /* 481 * the number of MIGRATE_ISOLATE *pageblock*. 482 * We need this for free page counting. Look at zone_watermark_ok_safe. 483 * It's protected by zone->lock 484 */ 485 int nr_pageblock_isolate; 486 #endif 487 } ____cacheline_internodealigned_in_smp; 488 489 typedef enum { 490 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 491 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 492 ZONE_CONGESTED, /* zone has many dirty pages backed by 493 * a congested BDI 494 */ 495 } zone_flags_t; 496 497 static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) 498 { 499 set_bit(flag, &zone->flags); 500 } 501 502 static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) 503 { 504 return test_and_set_bit(flag, &zone->flags); 505 } 506 507 static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) 508 { 509 clear_bit(flag, &zone->flags); 510 } 511 512 static inline int zone_is_reclaim_congested(const struct zone *zone) 513 { 514 return test_bit(ZONE_CONGESTED, &zone->flags); 515 } 516 517 static inline int zone_is_reclaim_locked(const struct zone *zone) 518 { 519 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 520 } 521 522 static inline int zone_is_oom_locked(const struct zone *zone) 523 { 524 return test_bit(ZONE_OOM_LOCKED, &zone->flags); 525 } 526 527 /* 528 * The "priority" of VM scanning is how much of the queues we will scan in one 529 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 530 * queues ("queue_length >> 12") during an aging round. 531 */ 532 #define DEF_PRIORITY 12 533 534 /* Maximum number of zones on a zonelist */ 535 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) 536 537 #ifdef CONFIG_NUMA 538 539 /* 540 * The NUMA zonelists are doubled because we need zonelists that restrict the 541 * allocations to a single node for GFP_THISNODE. 542 * 543 * [0] : Zonelist with fallback 544 * [1] : No fallback (GFP_THISNODE) 545 */ 546 #define MAX_ZONELISTS 2 547 548 549 /* 550 * We cache key information from each zonelist for smaller cache 551 * footprint when scanning for free pages in get_page_from_freelist(). 552 * 553 * 1) The BITMAP fullzones tracks which zones in a zonelist have come 554 * up short of free memory since the last time (last_fullzone_zap) 555 * we zero'd fullzones. 556 * 2) The array z_to_n[] maps each zone in the zonelist to its node 557 * id, so that we can efficiently evaluate whether that node is 558 * set in the current tasks mems_allowed. 559 * 560 * Both fullzones and z_to_n[] are one-to-one with the zonelist, 561 * indexed by a zones offset in the zonelist zones[] array. 562 * 563 * The get_page_from_freelist() routine does two scans. During the 564 * first scan, we skip zones whose corresponding bit in 'fullzones' 565 * is set or whose corresponding node in current->mems_allowed (which 566 * comes from cpusets) is not set. During the second scan, we bypass 567 * this zonelist_cache, to ensure we look methodically at each zone. 568 * 569 * Once per second, we zero out (zap) fullzones, forcing us to 570 * reconsider nodes that might have regained more free memory. 571 * The field last_full_zap is the time we last zapped fullzones. 572 * 573 * This mechanism reduces the amount of time we waste repeatedly 574 * reexaming zones for free memory when they just came up low on 575 * memory momentarilly ago. 576 * 577 * The zonelist_cache struct members logically belong in struct 578 * zonelist. However, the mempolicy zonelists constructed for 579 * MPOL_BIND are intentionally variable length (and usually much 580 * shorter). A general purpose mechanism for handling structs with 581 * multiple variable length members is more mechanism than we want 582 * here. We resort to some special case hackery instead. 583 * 584 * The MPOL_BIND zonelists don't need this zonelist_cache (in good 585 * part because they are shorter), so we put the fixed length stuff 586 * at the front of the zonelist struct, ending in a variable length 587 * zones[], as is needed by MPOL_BIND. 588 * 589 * Then we put the optional zonelist cache on the end of the zonelist 590 * struct. This optional stuff is found by a 'zlcache_ptr' pointer in 591 * the fixed length portion at the front of the struct. This pointer 592 * both enables us to find the zonelist cache, and in the case of 593 * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) 594 * to know that the zonelist cache is not there. 595 * 596 * The end result is that struct zonelists come in two flavors: 597 * 1) The full, fixed length version, shown below, and 598 * 2) The custom zonelists for MPOL_BIND. 599 * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. 600 * 601 * Even though there may be multiple CPU cores on a node modifying 602 * fullzones or last_full_zap in the same zonelist_cache at the same 603 * time, we don't lock it. This is just hint data - if it is wrong now 604 * and then, the allocator will still function, perhaps a bit slower. 605 */ 606 607 608 struct zonelist_cache { 609 unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ 610 DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ 611 unsigned long last_full_zap; /* when last zap'd (jiffies) */ 612 }; 613 #else 614 #define MAX_ZONELISTS 1 615 struct zonelist_cache; 616 #endif 617 618 /* 619 * This struct contains information about a zone in a zonelist. It is stored 620 * here to avoid dereferences into large structures and lookups of tables 621 */ 622 struct zoneref { 623 struct zone *zone; /* Pointer to actual zone */ 624 int zone_idx; /* zone_idx(zoneref->zone) */ 625 }; 626 627 /* 628 * One allocation request operates on a zonelist. A zonelist 629 * is a list of zones, the first one is the 'goal' of the 630 * allocation, the other zones are fallback zones, in decreasing 631 * priority. 632 * 633 * If zlcache_ptr is not NULL, then it is just the address of zlcache, 634 * as explained above. If zlcache_ptr is NULL, there is no zlcache. 635 * * 636 * To speed the reading of the zonelist, the zonerefs contain the zone index 637 * of the entry being read. Helper functions to access information given 638 * a struct zoneref are 639 * 640 * zonelist_zone() - Return the struct zone * for an entry in _zonerefs 641 * zonelist_zone_idx() - Return the index of the zone for an entry 642 * zonelist_node_idx() - Return the index of the node for an entry 643 */ 644 struct zonelist { 645 struct zonelist_cache *zlcache_ptr; // NULL or &zlcache 646 struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; 647 #ifdef CONFIG_NUMA 648 struct zonelist_cache zlcache; // optional ... 649 #endif 650 }; 651 652 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 653 struct node_active_region { 654 unsigned long start_pfn; 655 unsigned long end_pfn; 656 int nid; 657 }; 658 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 659 660 #ifndef CONFIG_DISCONTIGMEM 661 /* The array of struct pages - for discontigmem use pgdat->lmem_map */ 662 extern struct page *mem_map; 663 #endif 664 665 /* 666 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM 667 * (mostly NUMA machines?) to denote a higher-level memory zone than the 668 * zone denotes. 669 * 670 * On NUMA machines, each NUMA node would have a pg_data_t to describe 671 * it's memory layout. 672 * 673 * Memory statistics and page replacement data structures are maintained on a 674 * per-zone basis. 675 */ 676 struct bootmem_data; 677 typedef struct pglist_data { 678 struct zone node_zones[MAX_NR_ZONES]; 679 struct zonelist node_zonelists[MAX_ZONELISTS]; 680 int nr_zones; 681 #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 682 struct page *node_mem_map; 683 #ifdef CONFIG_MEMCG 684 struct page_cgroup *node_page_cgroup; 685 #endif 686 #endif 687 #ifndef CONFIG_NO_BOOTMEM 688 struct bootmem_data *bdata; 689 #endif 690 #ifdef CONFIG_MEMORY_HOTPLUG 691 /* 692 * Must be held any time you expect node_start_pfn, node_present_pages 693 * or node_spanned_pages stay constant. Holding this will also 694 * guarantee that any pfn_valid() stays that way. 695 * 696 * Nests above zone->lock and zone->size_seqlock. 697 */ 698 spinlock_t node_size_lock; 699 #endif 700 unsigned long node_start_pfn; 701 unsigned long node_present_pages; /* total number of physical pages */ 702 unsigned long node_spanned_pages; /* total size of physical page 703 range, including holes */ 704 int node_id; 705 nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */ 706 wait_queue_head_t kswapd_wait; 707 wait_queue_head_t pfmemalloc_wait; 708 struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ 709 int kswapd_max_order; 710 enum zone_type classzone_idx; 711 } pg_data_t; 712 713 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) 714 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) 715 #ifdef CONFIG_FLAT_NODE_MEM_MAP 716 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) 717 #else 718 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) 719 #endif 720 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) 721 722 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 723 724 #define node_end_pfn(nid) ({\ 725 pg_data_t *__pgdat = NODE_DATA(nid);\ 726 __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\ 727 }) 728 729 #include <linux/memory_hotplug.h> 730 731 extern struct mutex zonelists_mutex; 732 void build_all_zonelists(pg_data_t *pgdat, struct zone *zone); 733 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); 734 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, 735 int classzone_idx, int alloc_flags); 736 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, 737 int classzone_idx, int alloc_flags); 738 enum memmap_context { 739 MEMMAP_EARLY, 740 MEMMAP_HOTPLUG, 741 }; 742 extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, 743 unsigned long size, 744 enum memmap_context context); 745 746 extern void lruvec_init(struct lruvec *lruvec); 747 748 static inline struct zone *lruvec_zone(struct lruvec *lruvec) 749 { 750 #ifdef CONFIG_MEMCG 751 return lruvec->zone; 752 #else 753 return container_of(lruvec, struct zone, lruvec); 754 #endif 755 } 756 757 #ifdef CONFIG_HAVE_MEMORY_PRESENT 758 void memory_present(int nid, unsigned long start, unsigned long end); 759 #else 760 static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 761 #endif 762 763 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 764 int local_memory_node(int node_id); 765 #else 766 static inline int local_memory_node(int node_id) { return node_id; }; 767 #endif 768 769 #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE 770 unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 771 #endif 772 773 /* 774 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. 775 */ 776 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 777 778 static inline int populated_zone(struct zone *zone) 779 { 780 return (!!zone->present_pages); 781 } 782 783 extern int movable_zone; 784 785 static inline int zone_movable_is_highmem(void) 786 { 787 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 788 return movable_zone == ZONE_HIGHMEM; 789 #else 790 return 0; 791 #endif 792 } 793 794 static inline int is_highmem_idx(enum zone_type idx) 795 { 796 #ifdef CONFIG_HIGHMEM 797 return (idx == ZONE_HIGHMEM || 798 (idx == ZONE_MOVABLE && zone_movable_is_highmem())); 799 #else 800 return 0; 801 #endif 802 } 803 804 static inline int is_normal_idx(enum zone_type idx) 805 { 806 return (idx == ZONE_NORMAL); 807 } 808 809 /** 810 * is_highmem - helper function to quickly check if a struct zone is a 811 * highmem zone or not. This is an attempt to keep references 812 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. 813 * @zone - pointer to struct zone variable 814 */ 815 static inline int is_highmem(struct zone *zone) 816 { 817 #ifdef CONFIG_HIGHMEM 818 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; 819 return zone_off == ZONE_HIGHMEM * sizeof(*zone) || 820 (zone_off == ZONE_MOVABLE * sizeof(*zone) && 821 zone_movable_is_highmem()); 822 #else 823 return 0; 824 #endif 825 } 826 827 static inline int is_normal(struct zone *zone) 828 { 829 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; 830 } 831 832 static inline int is_dma32(struct zone *zone) 833 { 834 #ifdef CONFIG_ZONE_DMA32 835 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; 836 #else 837 return 0; 838 #endif 839 } 840 841 static inline int is_dma(struct zone *zone) 842 { 843 #ifdef CONFIG_ZONE_DMA 844 return zone == zone->zone_pgdat->node_zones + ZONE_DMA; 845 #else 846 return 0; 847 #endif 848 } 849 850 /* These two functions are used to setup the per zone pages min values */ 851 struct ctl_table; 852 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, 853 void __user *, size_t *, loff_t *); 854 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; 855 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, 856 void __user *, size_t *, loff_t *); 857 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, 858 void __user *, size_t *, loff_t *); 859 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 860 void __user *, size_t *, loff_t *); 861 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, 862 void __user *, size_t *, loff_t *); 863 864 extern int numa_zonelist_order_handler(struct ctl_table *, int, 865 void __user *, size_t *, loff_t *); 866 extern char numa_zonelist_order[]; 867 #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ 868 869 #ifndef CONFIG_NEED_MULTIPLE_NODES 870 871 extern struct pglist_data contig_page_data; 872 #define NODE_DATA(nid) (&contig_page_data) 873 #define NODE_MEM_MAP(nid) mem_map 874 875 #else /* CONFIG_NEED_MULTIPLE_NODES */ 876 877 #include <asm/mmzone.h> 878 879 #endif /* !CONFIG_NEED_MULTIPLE_NODES */ 880 881 extern struct pglist_data *first_online_pgdat(void); 882 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 883 extern struct zone *next_zone(struct zone *zone); 884 885 /** 886 * for_each_online_pgdat - helper macro to iterate over all online nodes 887 * @pgdat - pointer to a pg_data_t variable 888 */ 889 #define for_each_online_pgdat(pgdat) \ 890 for (pgdat = first_online_pgdat(); \ 891 pgdat; \ 892 pgdat = next_online_pgdat(pgdat)) 893 /** 894 * for_each_zone - helper macro to iterate over all memory zones 895 * @zone - pointer to struct zone variable 896 * 897 * The user only needs to declare the zone variable, for_each_zone 898 * fills it in. 899 */ 900 #define for_each_zone(zone) \ 901 for (zone = (first_online_pgdat())->node_zones; \ 902 zone; \ 903 zone = next_zone(zone)) 904 905 #define for_each_populated_zone(zone) \ 906 for (zone = (first_online_pgdat())->node_zones; \ 907 zone; \ 908 zone = next_zone(zone)) \ 909 if (!populated_zone(zone)) \ 910 ; /* do nothing */ \ 911 else 912 913 static inline struct zone *zonelist_zone(struct zoneref *zoneref) 914 { 915 return zoneref->zone; 916 } 917 918 static inline int zonelist_zone_idx(struct zoneref *zoneref) 919 { 920 return zoneref->zone_idx; 921 } 922 923 static inline int zonelist_node_idx(struct zoneref *zoneref) 924 { 925 #ifdef CONFIG_NUMA 926 /* zone_to_nid not available in this context */ 927 return zoneref->zone->node; 928 #else 929 return 0; 930 #endif /* CONFIG_NUMA */ 931 } 932 933 /** 934 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point 935 * @z - The cursor used as a starting point for the search 936 * @highest_zoneidx - The zone index of the highest zone to return 937 * @nodes - An optional nodemask to filter the zonelist with 938 * @zone - The first suitable zone found is returned via this parameter 939 * 940 * This function returns the next zone at or below a given zone index that is 941 * within the allowed nodemask using a cursor as the starting point for the 942 * search. The zoneref returned is a cursor that represents the current zone 943 * being examined. It should be advanced by one before calling 944 * next_zones_zonelist again. 945 */ 946 struct zoneref *next_zones_zonelist(struct zoneref *z, 947 enum zone_type highest_zoneidx, 948 nodemask_t *nodes, 949 struct zone **zone); 950 951 /** 952 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist 953 * @zonelist - The zonelist to search for a suitable zone 954 * @highest_zoneidx - The zone index of the highest zone to return 955 * @nodes - An optional nodemask to filter the zonelist with 956 * @zone - The first suitable zone found is returned via this parameter 957 * 958 * This function returns the first zone at or below a given zone index that is 959 * within the allowed nodemask. The zoneref returned is a cursor that can be 960 * used to iterate the zonelist with next_zones_zonelist by advancing it by 961 * one before calling. 962 */ 963 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 964 enum zone_type highest_zoneidx, 965 nodemask_t *nodes, 966 struct zone **zone) 967 { 968 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, 969 zone); 970 } 971 972 /** 973 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask 974 * @zone - The current zone in the iterator 975 * @z - The current pointer within zonelist->zones being iterated 976 * @zlist - The zonelist being iterated 977 * @highidx - The zone index of the highest zone to return 978 * @nodemask - Nodemask allowed by the allocator 979 * 980 * This iterator iterates though all zones at or below a given zone index and 981 * within a given nodemask 982 */ 983 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ 984 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ 985 zone; \ 986 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ 987 988 /** 989 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index 990 * @zone - The current zone in the iterator 991 * @z - The current pointer within zonelist->zones being iterated 992 * @zlist - The zonelist being iterated 993 * @highidx - The zone index of the highest zone to return 994 * 995 * This iterator iterates though all zones at or below a given zone index. 996 */ 997 #define for_each_zone_zonelist(zone, z, zlist, highidx) \ 998 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) 999 1000 #ifdef CONFIG_SPARSEMEM 1001 #include <asm/sparsemem.h> 1002 #endif 1003 1004 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ 1005 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1006 static inline unsigned long early_pfn_to_nid(unsigned long pfn) 1007 { 1008 return 0; 1009 } 1010 #endif 1011 1012 #ifdef CONFIG_FLATMEM 1013 #define pfn_to_nid(pfn) (0) 1014 #endif 1015 1016 #ifdef CONFIG_SPARSEMEM 1017 1018 /* 1019 * SECTION_SHIFT #bits space required to store a section # 1020 * 1021 * PA_SECTION_SHIFT physical address to/from section number 1022 * PFN_SECTION_SHIFT pfn to/from section number 1023 */ 1024 #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) 1025 1026 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS) 1027 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT) 1028 1029 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT) 1030 1031 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT) 1032 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1)) 1033 1034 #define SECTION_BLOCKFLAGS_BITS \ 1035 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS) 1036 1037 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1038 #error Allocator MAX_ORDER exceeds SECTION_SIZE 1039 #endif 1040 1041 #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) 1042 #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) 1043 1044 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1045 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1046 1047 struct page; 1048 struct page_cgroup; 1049 struct mem_section { 1050 /* 1051 * This is, logically, a pointer to an array of struct 1052 * pages. However, it is stored with some other magic. 1053 * (see sparse.c::sparse_init_one_section()) 1054 * 1055 * Additionally during early boot we encode node id of 1056 * the location of the section here to guide allocation. 1057 * (see sparse.c::memory_present()) 1058 * 1059 * Making it a UL at least makes someone do a cast 1060 * before using it wrong. 1061 */ 1062 unsigned long section_mem_map; 1063 1064 /* See declaration of similar field in struct zone */ 1065 unsigned long *pageblock_flags; 1066 #ifdef CONFIG_MEMCG 1067 /* 1068 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use 1069 * section. (see memcontrol.h/page_cgroup.h about this.) 1070 */ 1071 struct page_cgroup *page_cgroup; 1072 unsigned long pad; 1073 #endif 1074 }; 1075 1076 #ifdef CONFIG_SPARSEMEM_EXTREME 1077 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section)) 1078 #else 1079 #define SECTIONS_PER_ROOT 1 1080 #endif 1081 1082 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 1083 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) 1084 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 1085 1086 #ifdef CONFIG_SPARSEMEM_EXTREME 1087 extern struct mem_section *mem_section[NR_SECTION_ROOTS]; 1088 #else 1089 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]; 1090 #endif 1091 1092 static inline struct mem_section *__nr_to_section(unsigned long nr) 1093 { 1094 if (!mem_section[SECTION_NR_TO_ROOT(nr)]) 1095 return NULL; 1096 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; 1097 } 1098 extern int __section_nr(struct mem_section* ms); 1099 extern unsigned long usemap_size(void); 1100 1101 /* 1102 * We use the lower bits of the mem_map pointer to store 1103 * a little bit of information. There should be at least 1104 * 3 bits here due to 32-bit alignment. 1105 */ 1106 #define SECTION_MARKED_PRESENT (1UL<<0) 1107 #define SECTION_HAS_MEM_MAP (1UL<<1) 1108 #define SECTION_MAP_LAST_BIT (1UL<<2) 1109 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 1110 #define SECTION_NID_SHIFT 2 1111 1112 static inline struct page *__section_mem_map_addr(struct mem_section *section) 1113 { 1114 unsigned long map = section->section_mem_map; 1115 map &= SECTION_MAP_MASK; 1116 return (struct page *)map; 1117 } 1118 1119 static inline int present_section(struct mem_section *section) 1120 { 1121 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT)); 1122 } 1123 1124 static inline int present_section_nr(unsigned long nr) 1125 { 1126 return present_section(__nr_to_section(nr)); 1127 } 1128 1129 static inline int valid_section(struct mem_section *section) 1130 { 1131 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP)); 1132 } 1133 1134 static inline int valid_section_nr(unsigned long nr) 1135 { 1136 return valid_section(__nr_to_section(nr)); 1137 } 1138 1139 static inline struct mem_section *__pfn_to_section(unsigned long pfn) 1140 { 1141 return __nr_to_section(pfn_to_section_nr(pfn)); 1142 } 1143 1144 #ifndef CONFIG_HAVE_ARCH_PFN_VALID 1145 static inline int pfn_valid(unsigned long pfn) 1146 { 1147 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1148 return 0; 1149 return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); 1150 } 1151 #endif 1152 1153 static inline int pfn_present(unsigned long pfn) 1154 { 1155 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) 1156 return 0; 1157 return present_section(__nr_to_section(pfn_to_section_nr(pfn))); 1158 } 1159 1160 /* 1161 * These are _only_ used during initialisation, therefore they 1162 * can use __initdata ... They could have names to indicate 1163 * this restriction. 1164 */ 1165 #ifdef CONFIG_NUMA 1166 #define pfn_to_nid(pfn) \ 1167 ({ \ 1168 unsigned long __pfn_to_nid_pfn = (pfn); \ 1169 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \ 1170 }) 1171 #else 1172 #define pfn_to_nid(pfn) (0) 1173 #endif 1174 1175 #define early_pfn_valid(pfn) pfn_valid(pfn) 1176 void sparse_init(void); 1177 #else 1178 #define sparse_init() do {} while (0) 1179 #define sparse_index_init(_sec, _nid) do {} while (0) 1180 #endif /* CONFIG_SPARSEMEM */ 1181 1182 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 1183 bool early_pfn_in_nid(unsigned long pfn, int nid); 1184 #else 1185 #define early_pfn_in_nid(pfn, nid) (1) 1186 #endif 1187 1188 #ifndef early_pfn_valid 1189 #define early_pfn_valid(pfn) (1) 1190 #endif 1191 1192 void memory_present(int nid, unsigned long start, unsigned long end); 1193 unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 1194 1195 /* 1196 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we 1197 * need to check pfn validility within that MAX_ORDER_NR_PAGES block. 1198 * pfn_valid_within() should be used in this case; we optimise this away 1199 * when we have no holes within a MAX_ORDER_NR_PAGES block. 1200 */ 1201 #ifdef CONFIG_HOLES_IN_ZONE 1202 #define pfn_valid_within(pfn) pfn_valid(pfn) 1203 #else 1204 #define pfn_valid_within(pfn) (1) 1205 #endif 1206 1207 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL 1208 /* 1209 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap 1210 * associated with it or not. In FLATMEM, it is expected that holes always 1211 * have valid memmap as long as there is valid PFNs either side of the hole. 1212 * In SPARSEMEM, it is assumed that a valid section has a memmap for the 1213 * entire section. 1214 * 1215 * However, an ARM, and maybe other embedded architectures in the future 1216 * free memmap backing holes to save memory on the assumption the memmap is 1217 * never used. The page_zone linkages are then broken even though pfn_valid() 1218 * returns true. A walker of the full memmap must then do this additional 1219 * check to ensure the memmap they are looking at is sane by making sure 1220 * the zone and PFN linkages are still valid. This is expensive, but walkers 1221 * of the full memmap are extremely rare. 1222 */ 1223 int memmap_valid_within(unsigned long pfn, 1224 struct page *page, struct zone *zone); 1225 #else 1226 static inline int memmap_valid_within(unsigned long pfn, 1227 struct page *page, struct zone *zone) 1228 { 1229 return 1; 1230 } 1231 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 1232 1233 #endif /* !__GENERATING_BOUNDS.H */ 1234 #endif /* !__ASSEMBLY__ */ 1235 #endif /* _LINUX_MMZONE_H */ 1236