xref: /linux-6.15/include/linux/mmzone.h (revision f7511d5f)
1 #ifndef _LINUX_MMZONE_H
2 #define _LINUX_MMZONE_H
3 
4 #ifdef __KERNEL__
5 #ifndef __ASSEMBLY__
6 #ifndef __GENERATING_BOUNDS_H
7 
8 #include <linux/spinlock.h>
9 #include <linux/list.h>
10 #include <linux/wait.h>
11 #include <linux/bitops.h>
12 #include <linux/cache.h>
13 #include <linux/threads.h>
14 #include <linux/numa.h>
15 #include <linux/init.h>
16 #include <linux/seqlock.h>
17 #include <linux/nodemask.h>
18 #include <linux/pageblock-flags.h>
19 #include <linux/bounds.h>
20 #include <asm/atomic.h>
21 #include <asm/page.h>
22 
23 /* Free memory management - zoned buddy allocator.  */
24 #ifndef CONFIG_FORCE_MAX_ZONEORDER
25 #define MAX_ORDER 11
26 #else
27 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
28 #endif
29 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
30 
31 /*
32  * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
33  * costly to service.  That is between allocation orders which should
34  * coelesce naturally under reasonable reclaim pressure and those which
35  * will not.
36  */
37 #define PAGE_ALLOC_COSTLY_ORDER 3
38 
39 #define MIGRATE_UNMOVABLE     0
40 #define MIGRATE_RECLAIMABLE   1
41 #define MIGRATE_MOVABLE       2
42 #define MIGRATE_RESERVE       3
43 #define MIGRATE_ISOLATE       4 /* can't allocate from here */
44 #define MIGRATE_TYPES         5
45 
46 #define for_each_migratetype_order(order, type) \
47 	for (order = 0; order < MAX_ORDER; order++) \
48 		for (type = 0; type < MIGRATE_TYPES; type++)
49 
50 extern int page_group_by_mobility_disabled;
51 
52 static inline int get_pageblock_migratetype(struct page *page)
53 {
54 	if (unlikely(page_group_by_mobility_disabled))
55 		return MIGRATE_UNMOVABLE;
56 
57 	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
58 }
59 
60 struct free_area {
61 	struct list_head	free_list[MIGRATE_TYPES];
62 	unsigned long		nr_free;
63 };
64 
65 struct pglist_data;
66 
67 /*
68  * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
69  * So add a wild amount of padding here to ensure that they fall into separate
70  * cachelines.  There are very few zone structures in the machine, so space
71  * consumption is not a concern here.
72  */
73 #if defined(CONFIG_SMP)
74 struct zone_padding {
75 	char x[0];
76 } ____cacheline_internodealigned_in_smp;
77 #define ZONE_PADDING(name)	struct zone_padding name;
78 #else
79 #define ZONE_PADDING(name)
80 #endif
81 
82 enum zone_stat_item {
83 	/* First 128 byte cacheline (assuming 64 bit words) */
84 	NR_FREE_PAGES,
85 	NR_INACTIVE,
86 	NR_ACTIVE,
87 	NR_ANON_PAGES,	/* Mapped anonymous pages */
88 	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
89 			   only modified from process context */
90 	NR_FILE_PAGES,
91 	NR_FILE_DIRTY,
92 	NR_WRITEBACK,
93 	/* Second 128 byte cacheline */
94 	NR_SLAB_RECLAIMABLE,
95 	NR_SLAB_UNRECLAIMABLE,
96 	NR_PAGETABLE,		/* used for pagetables */
97 	NR_UNSTABLE_NFS,	/* NFS unstable pages */
98 	NR_BOUNCE,
99 	NR_VMSCAN_WRITE,
100 	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
101 #ifdef CONFIG_NUMA
102 	NUMA_HIT,		/* allocated in intended node */
103 	NUMA_MISS,		/* allocated in non intended node */
104 	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
105 	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
106 	NUMA_LOCAL,		/* allocation from local node */
107 	NUMA_OTHER,		/* allocation from other node */
108 #endif
109 	NR_VM_ZONE_STAT_ITEMS };
110 
111 struct per_cpu_pages {
112 	int count;		/* number of pages in the list */
113 	int high;		/* high watermark, emptying needed */
114 	int batch;		/* chunk size for buddy add/remove */
115 	struct list_head list;	/* the list of pages */
116 };
117 
118 struct per_cpu_pageset {
119 	struct per_cpu_pages pcp;
120 #ifdef CONFIG_NUMA
121 	s8 expire;
122 #endif
123 #ifdef CONFIG_SMP
124 	s8 stat_threshold;
125 	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
126 #endif
127 } ____cacheline_aligned_in_smp;
128 
129 #ifdef CONFIG_NUMA
130 #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
131 #else
132 #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
133 #endif
134 
135 #endif /* !__GENERATING_BOUNDS.H */
136 
137 enum zone_type {
138 #ifdef CONFIG_ZONE_DMA
139 	/*
140 	 * ZONE_DMA is used when there are devices that are not able
141 	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
142 	 * carve out the portion of memory that is needed for these devices.
143 	 * The range is arch specific.
144 	 *
145 	 * Some examples
146 	 *
147 	 * Architecture		Limit
148 	 * ---------------------------
149 	 * parisc, ia64, sparc	<4G
150 	 * s390			<2G
151 	 * arm			Various
152 	 * alpha		Unlimited or 0-16MB.
153 	 *
154 	 * i386, x86_64 and multiple other arches
155 	 * 			<16M.
156 	 */
157 	ZONE_DMA,
158 #endif
159 #ifdef CONFIG_ZONE_DMA32
160 	/*
161 	 * x86_64 needs two ZONE_DMAs because it supports devices that are
162 	 * only able to do DMA to the lower 16M but also 32 bit devices that
163 	 * can only do DMA areas below 4G.
164 	 */
165 	ZONE_DMA32,
166 #endif
167 	/*
168 	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
169 	 * performed on pages in ZONE_NORMAL if the DMA devices support
170 	 * transfers to all addressable memory.
171 	 */
172 	ZONE_NORMAL,
173 #ifdef CONFIG_HIGHMEM
174 	/*
175 	 * A memory area that is only addressable by the kernel through
176 	 * mapping portions into its own address space. This is for example
177 	 * used by i386 to allow the kernel to address the memory beyond
178 	 * 900MB. The kernel will set up special mappings (page
179 	 * table entries on i386) for each page that the kernel needs to
180 	 * access.
181 	 */
182 	ZONE_HIGHMEM,
183 #endif
184 	ZONE_MOVABLE,
185 	__MAX_NR_ZONES
186 };
187 
188 #ifndef __GENERATING_BOUNDS_H
189 
190 /*
191  * When a memory allocation must conform to specific limitations (such
192  * as being suitable for DMA) the caller will pass in hints to the
193  * allocator in the gfp_mask, in the zone modifier bits.  These bits
194  * are used to select a priority ordered list of memory zones which
195  * match the requested limits. See gfp_zone() in include/linux/gfp.h
196  */
197 
198 #if MAX_NR_ZONES < 2
199 #define ZONES_SHIFT 0
200 #elif MAX_NR_ZONES <= 2
201 #define ZONES_SHIFT 1
202 #elif MAX_NR_ZONES <= 4
203 #define ZONES_SHIFT 2
204 #else
205 #error ZONES_SHIFT -- too many zones configured adjust calculation
206 #endif
207 
208 struct zone {
209 	/* Fields commonly accessed by the page allocator */
210 	unsigned long		pages_min, pages_low, pages_high;
211 	/*
212 	 * We don't know if the memory that we're going to allocate will be freeable
213 	 * or/and it will be released eventually, so to avoid totally wasting several
214 	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
215 	 * to run OOM on the lower zones despite there's tons of freeable ram
216 	 * on the higher zones). This array is recalculated at runtime if the
217 	 * sysctl_lowmem_reserve_ratio sysctl changes.
218 	 */
219 	unsigned long		lowmem_reserve[MAX_NR_ZONES];
220 
221 #ifdef CONFIG_NUMA
222 	int node;
223 	/*
224 	 * zone reclaim becomes active if more unmapped pages exist.
225 	 */
226 	unsigned long		min_unmapped_pages;
227 	unsigned long		min_slab_pages;
228 	struct per_cpu_pageset	*pageset[NR_CPUS];
229 #else
230 	struct per_cpu_pageset	pageset[NR_CPUS];
231 #endif
232 	/*
233 	 * free areas of different sizes
234 	 */
235 	spinlock_t		lock;
236 #ifdef CONFIG_MEMORY_HOTPLUG
237 	/* see spanned/present_pages for more description */
238 	seqlock_t		span_seqlock;
239 #endif
240 	struct free_area	free_area[MAX_ORDER];
241 
242 #ifndef CONFIG_SPARSEMEM
243 	/*
244 	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
245 	 * In SPARSEMEM, this map is stored in struct mem_section
246 	 */
247 	unsigned long		*pageblock_flags;
248 #endif /* CONFIG_SPARSEMEM */
249 
250 
251 	ZONE_PADDING(_pad1_)
252 
253 	/* Fields commonly accessed by the page reclaim scanner */
254 	spinlock_t		lru_lock;
255 	struct list_head	active_list;
256 	struct list_head	inactive_list;
257 	unsigned long		nr_scan_active;
258 	unsigned long		nr_scan_inactive;
259 	unsigned long		pages_scanned;	   /* since last reclaim */
260 	unsigned long		flags;		   /* zone flags, see below */
261 
262 	/* Zone statistics */
263 	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
264 
265 	/*
266 	 * prev_priority holds the scanning priority for this zone.  It is
267 	 * defined as the scanning priority at which we achieved our reclaim
268 	 * target at the previous try_to_free_pages() or balance_pgdat()
269 	 * invokation.
270 	 *
271 	 * We use prev_priority as a measure of how much stress page reclaim is
272 	 * under - it drives the swappiness decision: whether to unmap mapped
273 	 * pages.
274 	 *
275 	 * Access to both this field is quite racy even on uniprocessor.  But
276 	 * it is expected to average out OK.
277 	 */
278 	int prev_priority;
279 
280 
281 	ZONE_PADDING(_pad2_)
282 	/* Rarely used or read-mostly fields */
283 
284 	/*
285 	 * wait_table		-- the array holding the hash table
286 	 * wait_table_hash_nr_entries	-- the size of the hash table array
287 	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
288 	 *
289 	 * The purpose of all these is to keep track of the people
290 	 * waiting for a page to become available and make them
291 	 * runnable again when possible. The trouble is that this
292 	 * consumes a lot of space, especially when so few things
293 	 * wait on pages at a given time. So instead of using
294 	 * per-page waitqueues, we use a waitqueue hash table.
295 	 *
296 	 * The bucket discipline is to sleep on the same queue when
297 	 * colliding and wake all in that wait queue when removing.
298 	 * When something wakes, it must check to be sure its page is
299 	 * truly available, a la thundering herd. The cost of a
300 	 * collision is great, but given the expected load of the
301 	 * table, they should be so rare as to be outweighed by the
302 	 * benefits from the saved space.
303 	 *
304 	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
305 	 * primary users of these fields, and in mm/page_alloc.c
306 	 * free_area_init_core() performs the initialization of them.
307 	 */
308 	wait_queue_head_t	* wait_table;
309 	unsigned long		wait_table_hash_nr_entries;
310 	unsigned long		wait_table_bits;
311 
312 	/*
313 	 * Discontig memory support fields.
314 	 */
315 	struct pglist_data	*zone_pgdat;
316 	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
317 	unsigned long		zone_start_pfn;
318 
319 	/*
320 	 * zone_start_pfn, spanned_pages and present_pages are all
321 	 * protected by span_seqlock.  It is a seqlock because it has
322 	 * to be read outside of zone->lock, and it is done in the main
323 	 * allocator path.  But, it is written quite infrequently.
324 	 *
325 	 * The lock is declared along with zone->lock because it is
326 	 * frequently read in proximity to zone->lock.  It's good to
327 	 * give them a chance of being in the same cacheline.
328 	 */
329 	unsigned long		spanned_pages;	/* total size, including holes */
330 	unsigned long		present_pages;	/* amount of memory (excluding holes) */
331 
332 	/*
333 	 * rarely used fields:
334 	 */
335 	const char		*name;
336 } ____cacheline_internodealigned_in_smp;
337 
338 typedef enum {
339 	ZONE_ALL_UNRECLAIMABLE,		/* all pages pinned */
340 	ZONE_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
341 	ZONE_OOM_LOCKED,		/* zone is in OOM killer zonelist */
342 } zone_flags_t;
343 
344 static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
345 {
346 	set_bit(flag, &zone->flags);
347 }
348 
349 static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
350 {
351 	return test_and_set_bit(flag, &zone->flags);
352 }
353 
354 static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
355 {
356 	clear_bit(flag, &zone->flags);
357 }
358 
359 static inline int zone_is_all_unreclaimable(const struct zone *zone)
360 {
361 	return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
362 }
363 
364 static inline int zone_is_reclaim_locked(const struct zone *zone)
365 {
366 	return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
367 }
368 
369 static inline int zone_is_oom_locked(const struct zone *zone)
370 {
371 	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
372 }
373 
374 /*
375  * The "priority" of VM scanning is how much of the queues we will scan in one
376  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
377  * queues ("queue_length >> 12") during an aging round.
378  */
379 #define DEF_PRIORITY 12
380 
381 /* Maximum number of zones on a zonelist */
382 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
383 
384 #ifdef CONFIG_NUMA
385 
386 /*
387  * The NUMA zonelists are doubled becausse we need zonelists that restrict the
388  * allocations to a single node for GFP_THISNODE.
389  *
390  * [0]	: Zonelist with fallback
391  * [1]	: No fallback (GFP_THISNODE)
392  */
393 #define MAX_ZONELISTS 2
394 
395 
396 /*
397  * We cache key information from each zonelist for smaller cache
398  * footprint when scanning for free pages in get_page_from_freelist().
399  *
400  * 1) The BITMAP fullzones tracks which zones in a zonelist have come
401  *    up short of free memory since the last time (last_fullzone_zap)
402  *    we zero'd fullzones.
403  * 2) The array z_to_n[] maps each zone in the zonelist to its node
404  *    id, so that we can efficiently evaluate whether that node is
405  *    set in the current tasks mems_allowed.
406  *
407  * Both fullzones and z_to_n[] are one-to-one with the zonelist,
408  * indexed by a zones offset in the zonelist zones[] array.
409  *
410  * The get_page_from_freelist() routine does two scans.  During the
411  * first scan, we skip zones whose corresponding bit in 'fullzones'
412  * is set or whose corresponding node in current->mems_allowed (which
413  * comes from cpusets) is not set.  During the second scan, we bypass
414  * this zonelist_cache, to ensure we look methodically at each zone.
415  *
416  * Once per second, we zero out (zap) fullzones, forcing us to
417  * reconsider nodes that might have regained more free memory.
418  * The field last_full_zap is the time we last zapped fullzones.
419  *
420  * This mechanism reduces the amount of time we waste repeatedly
421  * reexaming zones for free memory when they just came up low on
422  * memory momentarilly ago.
423  *
424  * The zonelist_cache struct members logically belong in struct
425  * zonelist.  However, the mempolicy zonelists constructed for
426  * MPOL_BIND are intentionally variable length (and usually much
427  * shorter).  A general purpose mechanism for handling structs with
428  * multiple variable length members is more mechanism than we want
429  * here.  We resort to some special case hackery instead.
430  *
431  * The MPOL_BIND zonelists don't need this zonelist_cache (in good
432  * part because they are shorter), so we put the fixed length stuff
433  * at the front of the zonelist struct, ending in a variable length
434  * zones[], as is needed by MPOL_BIND.
435  *
436  * Then we put the optional zonelist cache on the end of the zonelist
437  * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in
438  * the fixed length portion at the front of the struct.  This pointer
439  * both enables us to find the zonelist cache, and in the case of
440  * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
441  * to know that the zonelist cache is not there.
442  *
443  * The end result is that struct zonelists come in two flavors:
444  *  1) The full, fixed length version, shown below, and
445  *  2) The custom zonelists for MPOL_BIND.
446  * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
447  *
448  * Even though there may be multiple CPU cores on a node modifying
449  * fullzones or last_full_zap in the same zonelist_cache at the same
450  * time, we don't lock it.  This is just hint data - if it is wrong now
451  * and then, the allocator will still function, perhaps a bit slower.
452  */
453 
454 
455 struct zonelist_cache {
456 	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */
457 	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */
458 	unsigned long last_full_zap;		/* when last zap'd (jiffies) */
459 };
460 #else
461 #define MAX_ZONELISTS 1
462 struct zonelist_cache;
463 #endif
464 
465 /*
466  * This struct contains information about a zone in a zonelist. It is stored
467  * here to avoid dereferences into large structures and lookups of tables
468  */
469 struct zoneref {
470 	struct zone *zone;	/* Pointer to actual zone */
471 	int zone_idx;		/* zone_idx(zoneref->zone) */
472 };
473 
474 /*
475  * One allocation request operates on a zonelist. A zonelist
476  * is a list of zones, the first one is the 'goal' of the
477  * allocation, the other zones are fallback zones, in decreasing
478  * priority.
479  *
480  * If zlcache_ptr is not NULL, then it is just the address of zlcache,
481  * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
482  * *
483  * To speed the reading of the zonelist, the zonerefs contain the zone index
484  * of the entry being read. Helper functions to access information given
485  * a struct zoneref are
486  *
487  * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
488  * zonelist_zone_idx()	- Return the index of the zone for an entry
489  * zonelist_node_idx()	- Return the index of the node for an entry
490  */
491 struct zonelist {
492 	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache
493 	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
494 #ifdef CONFIG_NUMA
495 	struct zonelist_cache zlcache;			     // optional ...
496 #endif
497 };
498 
499 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
500 struct node_active_region {
501 	unsigned long start_pfn;
502 	unsigned long end_pfn;
503 	int nid;
504 };
505 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
506 
507 #ifndef CONFIG_DISCONTIGMEM
508 /* The array of struct pages - for discontigmem use pgdat->lmem_map */
509 extern struct page *mem_map;
510 #endif
511 
512 /*
513  * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
514  * (mostly NUMA machines?) to denote a higher-level memory zone than the
515  * zone denotes.
516  *
517  * On NUMA machines, each NUMA node would have a pg_data_t to describe
518  * it's memory layout.
519  *
520  * Memory statistics and page replacement data structures are maintained on a
521  * per-zone basis.
522  */
523 struct bootmem_data;
524 typedef struct pglist_data {
525 	struct zone node_zones[MAX_NR_ZONES];
526 	struct zonelist node_zonelists[MAX_ZONELISTS];
527 	int nr_zones;
528 #ifdef CONFIG_FLAT_NODE_MEM_MAP
529 	struct page *node_mem_map;
530 #endif
531 	struct bootmem_data *bdata;
532 #ifdef CONFIG_MEMORY_HOTPLUG
533 	/*
534 	 * Must be held any time you expect node_start_pfn, node_present_pages
535 	 * or node_spanned_pages stay constant.  Holding this will also
536 	 * guarantee that any pfn_valid() stays that way.
537 	 *
538 	 * Nests above zone->lock and zone->size_seqlock.
539 	 */
540 	spinlock_t node_size_lock;
541 #endif
542 	unsigned long node_start_pfn;
543 	unsigned long node_present_pages; /* total number of physical pages */
544 	unsigned long node_spanned_pages; /* total size of physical page
545 					     range, including holes */
546 	int node_id;
547 	wait_queue_head_t kswapd_wait;
548 	struct task_struct *kswapd;
549 	int kswapd_max_order;
550 } pg_data_t;
551 
552 #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
553 #define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
554 #ifdef CONFIG_FLAT_NODE_MEM_MAP
555 #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
556 #else
557 #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
558 #endif
559 #define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
560 
561 #include <linux/memory_hotplug.h>
562 
563 void get_zone_counts(unsigned long *active, unsigned long *inactive,
564 			unsigned long *free);
565 void build_all_zonelists(void);
566 void wakeup_kswapd(struct zone *zone, int order);
567 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
568 		int classzone_idx, int alloc_flags);
569 enum memmap_context {
570 	MEMMAP_EARLY,
571 	MEMMAP_HOTPLUG,
572 };
573 extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
574 				     unsigned long size,
575 				     enum memmap_context context);
576 
577 #ifdef CONFIG_HAVE_MEMORY_PRESENT
578 void memory_present(int nid, unsigned long start, unsigned long end);
579 #else
580 static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
581 #endif
582 
583 #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
584 unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
585 #endif
586 
587 /*
588  * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
589  */
590 #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)
591 
592 static inline int populated_zone(struct zone *zone)
593 {
594 	return (!!zone->present_pages);
595 }
596 
597 extern int movable_zone;
598 
599 static inline int zone_movable_is_highmem(void)
600 {
601 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
602 	return movable_zone == ZONE_HIGHMEM;
603 #else
604 	return 0;
605 #endif
606 }
607 
608 static inline int is_highmem_idx(enum zone_type idx)
609 {
610 #ifdef CONFIG_HIGHMEM
611 	return (idx == ZONE_HIGHMEM ||
612 		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
613 #else
614 	return 0;
615 #endif
616 }
617 
618 static inline int is_normal_idx(enum zone_type idx)
619 {
620 	return (idx == ZONE_NORMAL);
621 }
622 
623 /**
624  * is_highmem - helper function to quickly check if a struct zone is a
625  *              highmem zone or not.  This is an attempt to keep references
626  *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
627  * @zone - pointer to struct zone variable
628  */
629 static inline int is_highmem(struct zone *zone)
630 {
631 #ifdef CONFIG_HIGHMEM
632 	int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
633 	return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
634 	       (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
635 		zone_movable_is_highmem());
636 #else
637 	return 0;
638 #endif
639 }
640 
641 static inline int is_normal(struct zone *zone)
642 {
643 	return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
644 }
645 
646 static inline int is_dma32(struct zone *zone)
647 {
648 #ifdef CONFIG_ZONE_DMA32
649 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
650 #else
651 	return 0;
652 #endif
653 }
654 
655 static inline int is_dma(struct zone *zone)
656 {
657 #ifdef CONFIG_ZONE_DMA
658 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
659 #else
660 	return 0;
661 #endif
662 }
663 
664 /* These two functions are used to setup the per zone pages min values */
665 struct ctl_table;
666 struct file;
667 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
668 					void __user *, size_t *, loff_t *);
669 extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
670 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
671 					void __user *, size_t *, loff_t *);
672 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
673 					void __user *, size_t *, loff_t *);
674 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
675 			struct file *, void __user *, size_t *, loff_t *);
676 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
677 			struct file *, void __user *, size_t *, loff_t *);
678 
679 extern int numa_zonelist_order_handler(struct ctl_table *, int,
680 			struct file *, void __user *, size_t *, loff_t *);
681 extern char numa_zonelist_order[];
682 #define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */
683 
684 #include <linux/topology.h>
685 /* Returns the number of the current Node. */
686 #ifndef numa_node_id
687 #define numa_node_id()		(cpu_to_node(raw_smp_processor_id()))
688 #endif
689 
690 #ifndef CONFIG_NEED_MULTIPLE_NODES
691 
692 extern struct pglist_data contig_page_data;
693 #define NODE_DATA(nid)		(&contig_page_data)
694 #define NODE_MEM_MAP(nid)	mem_map
695 
696 #else /* CONFIG_NEED_MULTIPLE_NODES */
697 
698 #include <asm/mmzone.h>
699 
700 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
701 
702 extern struct pglist_data *first_online_pgdat(void);
703 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
704 extern struct zone *next_zone(struct zone *zone);
705 
706 /**
707  * for_each_pgdat - helper macro to iterate over all nodes
708  * @pgdat - pointer to a pg_data_t variable
709  */
710 #define for_each_online_pgdat(pgdat)			\
711 	for (pgdat = first_online_pgdat();		\
712 	     pgdat;					\
713 	     pgdat = next_online_pgdat(pgdat))
714 /**
715  * for_each_zone - helper macro to iterate over all memory zones
716  * @zone - pointer to struct zone variable
717  *
718  * The user only needs to declare the zone variable, for_each_zone
719  * fills it in.
720  */
721 #define for_each_zone(zone)			        \
722 	for (zone = (first_online_pgdat())->node_zones; \
723 	     zone;					\
724 	     zone = next_zone(zone))
725 
726 static inline struct zone *zonelist_zone(struct zoneref *zoneref)
727 {
728 	return zoneref->zone;
729 }
730 
731 static inline int zonelist_zone_idx(struct zoneref *zoneref)
732 {
733 	return zoneref->zone_idx;
734 }
735 
736 static inline int zonelist_node_idx(struct zoneref *zoneref)
737 {
738 #ifdef CONFIG_NUMA
739 	/* zone_to_nid not available in this context */
740 	return zoneref->zone->node;
741 #else
742 	return 0;
743 #endif /* CONFIG_NUMA */
744 }
745 
746 /**
747  * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
748  * @z - The cursor used as a starting point for the search
749  * @highest_zoneidx - The zone index of the highest zone to return
750  * @nodes - An optional nodemask to filter the zonelist with
751  * @zone - The first suitable zone found is returned via this parameter
752  *
753  * This function returns the next zone at or below a given zone index that is
754  * within the allowed nodemask using a cursor as the starting point for the
755  * search. The zoneref returned is a cursor that is used as the next starting
756  * point for future calls to next_zones_zonelist().
757  */
758 struct zoneref *next_zones_zonelist(struct zoneref *z,
759 					enum zone_type highest_zoneidx,
760 					nodemask_t *nodes,
761 					struct zone **zone);
762 
763 /**
764  * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
765  * @zonelist - The zonelist to search for a suitable zone
766  * @highest_zoneidx - The zone index of the highest zone to return
767  * @nodes - An optional nodemask to filter the zonelist with
768  * @zone - The first suitable zone found is returned via this parameter
769  *
770  * This function returns the first zone at or below a given zone index that is
771  * within the allowed nodemask. The zoneref returned is a cursor that can be
772  * used to iterate the zonelist with next_zones_zonelist. The cursor should
773  * not be used by the caller as it does not match the value of the zone
774  * returned.
775  */
776 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
777 					enum zone_type highest_zoneidx,
778 					nodemask_t *nodes,
779 					struct zone **zone)
780 {
781 	return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
782 								zone);
783 }
784 
785 /**
786  * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
787  * @zone - The current zone in the iterator
788  * @z - The current pointer within zonelist->zones being iterated
789  * @zlist - The zonelist being iterated
790  * @highidx - The zone index of the highest zone to return
791  * @nodemask - Nodemask allowed by the allocator
792  *
793  * This iterator iterates though all zones at or below a given zone index and
794  * within a given nodemask
795  */
796 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
797 	for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone);	\
798 		zone;							\
799 		z = next_zones_zonelist(z, highidx, nodemask, &zone))	\
800 
801 /**
802  * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
803  * @zone - The current zone in the iterator
804  * @z - The current pointer within zonelist->zones being iterated
805  * @zlist - The zonelist being iterated
806  * @highidx - The zone index of the highest zone to return
807  *
808  * This iterator iterates though all zones at or below a given zone index.
809  */
810 #define for_each_zone_zonelist(zone, z, zlist, highidx) \
811 	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
812 
813 #ifdef CONFIG_SPARSEMEM
814 #include <asm/sparsemem.h>
815 #endif
816 
817 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
818 	!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
819 static inline unsigned long early_pfn_to_nid(unsigned long pfn)
820 {
821 	return 0;
822 }
823 #endif
824 
825 #ifdef CONFIG_FLATMEM
826 #define pfn_to_nid(pfn)		(0)
827 #endif
828 
829 #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
830 #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
831 
832 #ifdef CONFIG_SPARSEMEM
833 
834 /*
835  * SECTION_SHIFT    		#bits space required to store a section #
836  *
837  * PA_SECTION_SHIFT		physical address to/from section number
838  * PFN_SECTION_SHIFT		pfn to/from section number
839  */
840 #define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
841 
842 #define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
843 #define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)
844 
845 #define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)
846 
847 #define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
848 #define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))
849 
850 #define SECTION_BLOCKFLAGS_BITS \
851 	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
852 
853 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
854 #error Allocator MAX_ORDER exceeds SECTION_SIZE
855 #endif
856 
857 struct page;
858 struct mem_section {
859 	/*
860 	 * This is, logically, a pointer to an array of struct
861 	 * pages.  However, it is stored with some other magic.
862 	 * (see sparse.c::sparse_init_one_section())
863 	 *
864 	 * Additionally during early boot we encode node id of
865 	 * the location of the section here to guide allocation.
866 	 * (see sparse.c::memory_present())
867 	 *
868 	 * Making it a UL at least makes someone do a cast
869 	 * before using it wrong.
870 	 */
871 	unsigned long section_mem_map;
872 
873 	/* See declaration of similar field in struct zone */
874 	unsigned long *pageblock_flags;
875 };
876 
877 #ifdef CONFIG_SPARSEMEM_EXTREME
878 #define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
879 #else
880 #define SECTIONS_PER_ROOT	1
881 #endif
882 
883 #define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
884 #define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
885 #define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
886 
887 #ifdef CONFIG_SPARSEMEM_EXTREME
888 extern struct mem_section *mem_section[NR_SECTION_ROOTS];
889 #else
890 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
891 #endif
892 
893 static inline struct mem_section *__nr_to_section(unsigned long nr)
894 {
895 	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
896 		return NULL;
897 	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
898 }
899 extern int __section_nr(struct mem_section* ms);
900 extern unsigned long usemap_size(void);
901 
902 /*
903  * We use the lower bits of the mem_map pointer to store
904  * a little bit of information.  There should be at least
905  * 3 bits here due to 32-bit alignment.
906  */
907 #define	SECTION_MARKED_PRESENT	(1UL<<0)
908 #define SECTION_HAS_MEM_MAP	(1UL<<1)
909 #define SECTION_MAP_LAST_BIT	(1UL<<2)
910 #define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
911 #define SECTION_NID_SHIFT	2
912 
913 static inline struct page *__section_mem_map_addr(struct mem_section *section)
914 {
915 	unsigned long map = section->section_mem_map;
916 	map &= SECTION_MAP_MASK;
917 	return (struct page *)map;
918 }
919 
920 static inline int present_section(struct mem_section *section)
921 {
922 	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
923 }
924 
925 static inline int present_section_nr(unsigned long nr)
926 {
927 	return present_section(__nr_to_section(nr));
928 }
929 
930 static inline int valid_section(struct mem_section *section)
931 {
932 	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
933 }
934 
935 static inline int valid_section_nr(unsigned long nr)
936 {
937 	return valid_section(__nr_to_section(nr));
938 }
939 
940 static inline struct mem_section *__pfn_to_section(unsigned long pfn)
941 {
942 	return __nr_to_section(pfn_to_section_nr(pfn));
943 }
944 
945 static inline int pfn_valid(unsigned long pfn)
946 {
947 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
948 		return 0;
949 	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
950 }
951 
952 static inline int pfn_present(unsigned long pfn)
953 {
954 	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
955 		return 0;
956 	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
957 }
958 
959 /*
960  * These are _only_ used during initialisation, therefore they
961  * can use __initdata ...  They could have names to indicate
962  * this restriction.
963  */
964 #ifdef CONFIG_NUMA
965 #define pfn_to_nid(pfn)							\
966 ({									\
967 	unsigned long __pfn_to_nid_pfn = (pfn);				\
968 	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
969 })
970 #else
971 #define pfn_to_nid(pfn)		(0)
972 #endif
973 
974 #define early_pfn_valid(pfn)	pfn_valid(pfn)
975 void sparse_init(void);
976 #else
977 #define sparse_init()	do {} while (0)
978 #define sparse_index_init(_sec, _nid)  do {} while (0)
979 #endif /* CONFIG_SPARSEMEM */
980 
981 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
982 #define early_pfn_in_nid(pfn, nid)	(early_pfn_to_nid(pfn) == (nid))
983 #else
984 #define early_pfn_in_nid(pfn, nid)	(1)
985 #endif
986 
987 #ifndef early_pfn_valid
988 #define early_pfn_valid(pfn)	(1)
989 #endif
990 
991 void memory_present(int nid, unsigned long start, unsigned long end);
992 unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
993 
994 /*
995  * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
996  * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
997  * pfn_valid_within() should be used in this case; we optimise this away
998  * when we have no holes within a MAX_ORDER_NR_PAGES block.
999  */
1000 #ifdef CONFIG_HOLES_IN_ZONE
1001 #define pfn_valid_within(pfn) pfn_valid(pfn)
1002 #else
1003 #define pfn_valid_within(pfn) (1)
1004 #endif
1005 
1006 #endif /* !__GENERATING_BOUNDS.H */
1007 #endif /* !__ASSEMBLY__ */
1008 #endif /* __KERNEL__ */
1009 #endif /* _LINUX_MMZONE_H */
1010