12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
295f72d1eSYinghai Lu #ifndef _LINUX_MEMBLOCK_H
395f72d1eSYinghai Lu #define _LINUX_MEMBLOCK_H
495f72d1eSYinghai Lu
595f72d1eSYinghai Lu /*
695f72d1eSYinghai Lu * Logical memory blocks.
795f72d1eSYinghai Lu *
895f72d1eSYinghai Lu * Copyright (C) 2001 Peter Bergner, IBM Corp.
995f72d1eSYinghai Lu */
1095f72d1eSYinghai Lu
1195f72d1eSYinghai Lu #include <linux/init.h>
1295f72d1eSYinghai Lu #include <linux/mm.h>
1357c8a661SMike Rapoport #include <asm/dma.h>
1457c8a661SMike Rapoport
1557c8a661SMike Rapoport extern unsigned long max_low_pfn;
1657c8a661SMike Rapoport extern unsigned long min_low_pfn;
1757c8a661SMike Rapoport
1857c8a661SMike Rapoport /*
1957c8a661SMike Rapoport * highest page
2057c8a661SMike Rapoport */
2157c8a661SMike Rapoport extern unsigned long max_pfn;
2257c8a661SMike Rapoport /*
2357c8a661SMike Rapoport * highest possible page
2457c8a661SMike Rapoport */
2557c8a661SMike Rapoport extern unsigned long long max_possible_pfn;
2695f72d1eSYinghai Lu
279a0de1bfSMike Rapoport /**
289a0de1bfSMike Rapoport * enum memblock_flags - definition of memory region attributes
299a0de1bfSMike Rapoport * @MEMBLOCK_NONE: no special request
30e14b4155SDavid Hildenbrand * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
31e14b4155SDavid Hildenbrand * map during early boot as hot(un)pluggable system RAM (e.g., memory range
32e14b4155SDavid Hildenbrand * that might get hotunplugged later). With "movable_node" set on the kernel
33e14b4155SDavid Hildenbrand * commandline, try keeping this memory region hotunpluggable. Does not apply
34e14b4155SDavid Hildenbrand * to memblocks added ("hotplugged") after early boot.
359a0de1bfSMike Rapoport * @MEMBLOCK_MIRROR: mirrored region
369092d4f7SMike Rapoport * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
379092d4f7SMike Rapoport * reserved in the memory map; refer to memblock_mark_nomap() description
389092d4f7SMike Rapoport * for further details
39f7892d8eSDavid Hildenbrand * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
40f7892d8eSDavid Hildenbrand * via a driver, and never indicated in the firmware-provided memory map as
41f7892d8eSDavid Hildenbrand * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
42f7892d8eSDavid Hildenbrand * kernel resource tree.
4377e6c43eSUsama Arif * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
4477e6c43eSUsama Arif * not initialized (only for reserved regions).
459a0de1bfSMike Rapoport */
46e1720feeSMike Rapoport enum memblock_flags {
47fc6daaf9STony Luck MEMBLOCK_NONE = 0x0, /* No special request */
48fc6daaf9STony Luck MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
49a3f5bafcSTony Luck MEMBLOCK_MIRROR = 0x2, /* mirrored region */
50bf3d3cc5SArd Biesheuvel MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
51f7892d8eSDavid Hildenbrand MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
5277e6c43eSUsama Arif MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
53fc6daaf9STony Luck };
5466b16edfSTang Chen
559a0de1bfSMike Rapoport /**
569a0de1bfSMike Rapoport * struct memblock_region - represents a memory region
578cbd54f5Schenqiwu * @base: base address of the region
589a0de1bfSMike Rapoport * @size: size of the region
599a0de1bfSMike Rapoport * @flags: memory region attributes
609a0de1bfSMike Rapoport * @nid: NUMA node id
619a0de1bfSMike Rapoport */
62e3239ff9SBenjamin Herrenschmidt struct memblock_region {
632898cc4cSBenjamin Herrenschmidt phys_addr_t base;
642898cc4cSBenjamin Herrenschmidt phys_addr_t size;
65e1720feeSMike Rapoport enum memblock_flags flags;
66a9ee6cf5SMike Rapoport #ifdef CONFIG_NUMA
677c0caeb8STejun Heo int nid;
687c0caeb8STejun Heo #endif
6995f72d1eSYinghai Lu };
7095f72d1eSYinghai Lu
719a0de1bfSMike Rapoport /**
729a0de1bfSMike Rapoport * struct memblock_type - collection of memory regions of certain type
739a0de1bfSMike Rapoport * @cnt: number of regions
749a0de1bfSMike Rapoport * @max: size of the allocated array
759a0de1bfSMike Rapoport * @total_size: size of all regions
769a0de1bfSMike Rapoport * @regions: array of regions
779a0de1bfSMike Rapoport * @name: the memory type symbolic name
789a0de1bfSMike Rapoport */
79e3239ff9SBenjamin Herrenschmidt struct memblock_type {
809a0de1bfSMike Rapoport unsigned long cnt;
819a0de1bfSMike Rapoport unsigned long max;
829a0de1bfSMike Rapoport phys_addr_t total_size;
83bf23c51fSBenjamin Herrenschmidt struct memblock_region *regions;
840262d9c8SHeiko Carstens char *name;
8595f72d1eSYinghai Lu };
8695f72d1eSYinghai Lu
879a0de1bfSMike Rapoport /**
889a0de1bfSMike Rapoport * struct memblock - memblock allocator metadata
899a0de1bfSMike Rapoport * @bottom_up: is bottom up direction?
909a0de1bfSMike Rapoport * @current_limit: physical address of the current allocation limit
918cbd54f5Schenqiwu * @memory: usable memory regions
929a0de1bfSMike Rapoport * @reserved: reserved memory regions
939a0de1bfSMike Rapoport */
9495f72d1eSYinghai Lu struct memblock {
9579442ed1STang Chen bool bottom_up; /* is bottom up direction? */
962898cc4cSBenjamin Herrenschmidt phys_addr_t current_limit;
97e3239ff9SBenjamin Herrenschmidt struct memblock_type memory;
98e3239ff9SBenjamin Herrenschmidt struct memblock_type reserved;
9995f72d1eSYinghai Lu };
10095f72d1eSYinghai Lu
10195f72d1eSYinghai Lu extern struct memblock memblock;
1025e63cf43SYinghai Lu
103350e88baSMike Rapoport #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
104036fbb21SKirill A. Shutemov #define __init_memblock __meminit
105036fbb21SKirill A. Shutemov #define __initdata_memblock __meminitdata
1063010f876SPavel Tatashin void memblock_discard(void);
107036fbb21SKirill A. Shutemov #else
108036fbb21SKirill A. Shutemov #define __init_memblock
109036fbb21SKirill A. Shutemov #define __initdata_memblock
memblock_discard(void)110350e88baSMike Rapoport static inline void memblock_discard(void) {}
111036fbb21SKirill A. Shutemov #endif
112036fbb21SKirill A. Shutemov
1131aadc056STejun Heo void memblock_allow_resize(void);
114952eea9bSDavid Hildenbrand int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
115952eea9bSDavid Hildenbrand enum memblock_flags flags);
116581adcbeSTejun Heo int memblock_add(phys_addr_t base, phys_addr_t size);
117581adcbeSTejun Heo int memblock_remove(phys_addr_t base, phys_addr_t size);
1183ecc6834SMike Rapoport int memblock_phys_free(phys_addr_t base, phys_addr_t size);
119581adcbeSTejun Heo int memblock_reserve(phys_addr_t base, phys_addr_t size);
12002634a44SAnshuman Khandual #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
12102634a44SAnshuman Khandual int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
12202634a44SAnshuman Khandual #endif
1236ede1fd3SYinghai Lu void memblock_trim_memory(phys_addr_t align);
1249b99c17fSAlison Schofield unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
1259b99c17fSAlison Schofield phys_addr_t base2, phys_addr_t size2);
12695cf82ecSTang Chen bool memblock_overlaps_region(struct memblock_type *type,
12795cf82ecSTang Chen phys_addr_t base, phys_addr_t size);
128ff6c3d81SLiam Ni bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
12966b16edfSTang Chen int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
13066b16edfSTang Chen int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
131a3f5bafcSTony Luck int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
132bf3d3cc5SArd Biesheuvel int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
1334c546b8aSAKASHI Takahiro int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
13477e6c43eSUsama Arif int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
135f1af9d3aSPhilipp Hachtmann
1364421cca0SMike Rapoport void memblock_free(void *ptr, size_t size);
13757c8a661SMike Rapoport void reset_all_zones_managed_pages(void);
13857c8a661SMike Rapoport
139f1af9d3aSPhilipp Hachtmann /* Low level functions */
140e1720feeSMike Rapoport void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
141fc6daaf9STony Luck struct memblock_type *type_a,
142f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, phys_addr_t *out_start,
143f1af9d3aSPhilipp Hachtmann phys_addr_t *out_end, int *out_nid);
144f1af9d3aSPhilipp Hachtmann
145e1720feeSMike Rapoport void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
146fc6daaf9STony Luck struct memblock_type *type_a,
147f1af9d3aSPhilipp Hachtmann struct memblock_type *type_b, phys_addr_t *out_start,
148f1af9d3aSPhilipp Hachtmann phys_addr_t *out_end, int *out_nid);
149f1af9d3aSPhilipp Hachtmann
150621d9739SMike Rapoport void memblock_free_late(phys_addr_t base, phys_addr_t size);
1513010f876SPavel Tatashin
15277649905SDavid Hildenbrand #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
__next_physmem_range(u64 * idx,struct memblock_type * type,phys_addr_t * out_start,phys_addr_t * out_end)15377649905SDavid Hildenbrand static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
15477649905SDavid Hildenbrand phys_addr_t *out_start,
15577649905SDavid Hildenbrand phys_addr_t *out_end)
15677649905SDavid Hildenbrand {
15777649905SDavid Hildenbrand extern struct memblock_type physmem;
15877649905SDavid Hildenbrand
15977649905SDavid Hildenbrand __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
16077649905SDavid Hildenbrand out_start, out_end, NULL);
16177649905SDavid Hildenbrand }
16277649905SDavid Hildenbrand
16377649905SDavid Hildenbrand /**
16477649905SDavid Hildenbrand * for_each_physmem_range - iterate through physmem areas not included in type.
16577649905SDavid Hildenbrand * @i: u64 used as loop variable
16677649905SDavid Hildenbrand * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
16777649905SDavid Hildenbrand * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
16877649905SDavid Hildenbrand * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
16977649905SDavid Hildenbrand */
17077649905SDavid Hildenbrand #define for_each_physmem_range(i, type, p_start, p_end) \
17177649905SDavid Hildenbrand for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
17277649905SDavid Hildenbrand i != (u64)ULLONG_MAX; \
17377649905SDavid Hildenbrand __next_physmem_range(&i, type, p_start, p_end))
17477649905SDavid Hildenbrand #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
17577649905SDavid Hildenbrand
176f1af9d3aSPhilipp Hachtmann /**
1776e245ad4SMike Rapoport * __for_each_mem_range - iterate through memblock areas from type_a and not
178f1af9d3aSPhilipp Hachtmann * included in type_b. Or just type_a if type_b is NULL.
179f1af9d3aSPhilipp Hachtmann * @i: u64 used as loop variable
180f1af9d3aSPhilipp Hachtmann * @type_a: ptr to memblock_type to iterate
181f1af9d3aSPhilipp Hachtmann * @type_b: ptr to memblock_type which excludes from the iteration
182f1af9d3aSPhilipp Hachtmann * @nid: node selector, %NUMA_NO_NODE for all nodes
183fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes
184f1af9d3aSPhilipp Hachtmann * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
185f1af9d3aSPhilipp Hachtmann * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
186f1af9d3aSPhilipp Hachtmann * @p_nid: ptr to int for nid of the range, can be %NULL
187f1af9d3aSPhilipp Hachtmann */
1886e245ad4SMike Rapoport #define __for_each_mem_range(i, type_a, type_b, nid, flags, \
189f1af9d3aSPhilipp Hachtmann p_start, p_end, p_nid) \
190fc6daaf9STony Luck for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
191f1af9d3aSPhilipp Hachtmann p_start, p_end, p_nid); \
192f1af9d3aSPhilipp Hachtmann i != (u64)ULLONG_MAX; \
193fc6daaf9STony Luck __next_mem_range(&i, nid, flags, type_a, type_b, \
194f1af9d3aSPhilipp Hachtmann p_start, p_end, p_nid))
195f1af9d3aSPhilipp Hachtmann
196f1af9d3aSPhilipp Hachtmann /**
1976e245ad4SMike Rapoport * __for_each_mem_range_rev - reverse iterate through memblock areas from
198f1af9d3aSPhilipp Hachtmann * type_a and not included in type_b. Or just type_a if type_b is NULL.
199f1af9d3aSPhilipp Hachtmann * @i: u64 used as loop variable
200f1af9d3aSPhilipp Hachtmann * @type_a: ptr to memblock_type to iterate
201f1af9d3aSPhilipp Hachtmann * @type_b: ptr to memblock_type which excludes from the iteration
202f1af9d3aSPhilipp Hachtmann * @nid: node selector, %NUMA_NO_NODE for all nodes
203fc6daaf9STony Luck * @flags: pick from blocks based on memory attributes
204f1af9d3aSPhilipp Hachtmann * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
205f1af9d3aSPhilipp Hachtmann * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
206f1af9d3aSPhilipp Hachtmann * @p_nid: ptr to int for nid of the range, can be %NULL
207f1af9d3aSPhilipp Hachtmann */
2086e245ad4SMike Rapoport #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
209f1af9d3aSPhilipp Hachtmann p_start, p_end, p_nid) \
210f1af9d3aSPhilipp Hachtmann for (i = (u64)ULLONG_MAX, \
211fc6daaf9STony Luck __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
212f1af9d3aSPhilipp Hachtmann p_start, p_end, p_nid); \
213f1af9d3aSPhilipp Hachtmann i != (u64)ULLONG_MAX; \
214fc6daaf9STony Luck __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
215f1af9d3aSPhilipp Hachtmann p_start, p_end, p_nid))
216f1af9d3aSPhilipp Hachtmann
2178e7a7f86SRobin Holt /**
2186e245ad4SMike Rapoport * for_each_mem_range - iterate through memory areas.
2196e245ad4SMike Rapoport * @i: u64 used as loop variable
2206e245ad4SMike Rapoport * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
2216e245ad4SMike Rapoport * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
2226e245ad4SMike Rapoport */
2236e245ad4SMike Rapoport #define for_each_mem_range(i, p_start, p_end) \
2246e245ad4SMike Rapoport __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
225f7892d8eSDavid Hildenbrand MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
226f7892d8eSDavid Hildenbrand p_start, p_end, NULL)
2276e245ad4SMike Rapoport
2286e245ad4SMike Rapoport /**
2296e245ad4SMike Rapoport * for_each_mem_range_rev - reverse iterate through memblock areas from
2306e245ad4SMike Rapoport * type_a and not included in type_b. Or just type_a if type_b is NULL.
2316e245ad4SMike Rapoport * @i: u64 used as loop variable
2326e245ad4SMike Rapoport * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
2336e245ad4SMike Rapoport * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
2346e245ad4SMike Rapoport */
2356e245ad4SMike Rapoport #define for_each_mem_range_rev(i, p_start, p_end) \
2366e245ad4SMike Rapoport __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
237f7892d8eSDavid Hildenbrand MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
238f7892d8eSDavid Hildenbrand p_start, p_end, NULL)
2396e245ad4SMike Rapoport
2406e245ad4SMike Rapoport /**
2419f3d5eaaSMike Rapoport * for_each_reserved_mem_range - iterate over all reserved memblock areas
2428e7a7f86SRobin Holt * @i: u64 used as loop variable
2438e7a7f86SRobin Holt * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
2448e7a7f86SRobin Holt * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
2458e7a7f86SRobin Holt *
2468e7a7f86SRobin Holt * Walks over reserved areas of memblock. Available as soon as memblock
2478e7a7f86SRobin Holt * is initialized.
2488e7a7f86SRobin Holt */
2499f3d5eaaSMike Rapoport #define for_each_reserved_mem_range(i, p_start, p_end) \
2509f3d5eaaSMike Rapoport __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
2519f3d5eaaSMike Rapoport MEMBLOCK_NONE, p_start, p_end, NULL)
2528e7a7f86SRobin Holt
memblock_is_hotpluggable(struct memblock_region * m)25355ac590cSTang Chen static inline bool memblock_is_hotpluggable(struct memblock_region *m)
25455ac590cSTang Chen {
25555ac590cSTang Chen return m->flags & MEMBLOCK_HOTPLUG;
25655ac590cSTang Chen }
25755ac590cSTang Chen
memblock_is_mirror(struct memblock_region * m)258a3f5bafcSTony Luck static inline bool memblock_is_mirror(struct memblock_region *m)
259a3f5bafcSTony Luck {
260a3f5bafcSTony Luck return m->flags & MEMBLOCK_MIRROR;
261a3f5bafcSTony Luck }
262a3f5bafcSTony Luck
memblock_is_nomap(struct memblock_region * m)263bf3d3cc5SArd Biesheuvel static inline bool memblock_is_nomap(struct memblock_region *m)
264bf3d3cc5SArd Biesheuvel {
265bf3d3cc5SArd Biesheuvel return m->flags & MEMBLOCK_NOMAP;
266bf3d3cc5SArd Biesheuvel }
267bf3d3cc5SArd Biesheuvel
memblock_is_reserved_noinit(struct memblock_region * m)26877e6c43eSUsama Arif static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
26977e6c43eSUsama Arif {
27077e6c43eSUsama Arif return m->flags & MEMBLOCK_RSRV_NOINIT;
27177e6c43eSUsama Arif }
27277e6c43eSUsama Arif
memblock_is_driver_managed(struct memblock_region * m)273f7892d8eSDavid Hildenbrand static inline bool memblock_is_driver_managed(struct memblock_region *m)
274f7892d8eSDavid Hildenbrand {
275f7892d8eSDavid Hildenbrand return m->flags & MEMBLOCK_DRIVER_MANAGED;
276f7892d8eSDavid Hildenbrand }
277f7892d8eSDavid Hildenbrand
278e76b63f8SYinghai Lu int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
279e76b63f8SYinghai Lu unsigned long *end_pfn);
2800ee332c1STejun Heo void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
2810ee332c1STejun Heo unsigned long *out_end_pfn, int *out_nid);
2820ee332c1STejun Heo
2830ee332c1STejun Heo /**
2840ee332c1STejun Heo * for_each_mem_pfn_range - early memory pfn range iterator
2850ee332c1STejun Heo * @i: an integer used as loop variable
2860ee332c1STejun Heo * @nid: node selector, %MAX_NUMNODES for all nodes
2870ee332c1STejun Heo * @p_start: ptr to ulong for start pfn of the range, can be %NULL
2880ee332c1STejun Heo * @p_end: ptr to ulong for end pfn of the range, can be %NULL
2890ee332c1STejun Heo * @p_nid: ptr to int for nid of the range, can be %NULL
2900ee332c1STejun Heo *
291f2d52fe5SWanpeng Li * Walks over configured memory ranges.
2920ee332c1STejun Heo */
2930ee332c1STejun Heo #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
2940ee332c1STejun Heo for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
2950ee332c1STejun Heo i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
2960ee332c1STejun Heo
297837566e7SAlexander Duyck #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
298837566e7SAlexander Duyck void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
299837566e7SAlexander Duyck unsigned long *out_spfn,
300837566e7SAlexander Duyck unsigned long *out_epfn);
3010e56acaeSAlexander Duyck
3020e56acaeSAlexander Duyck /**
303909782adSMauro Carvalho Chehab * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
3040e56acaeSAlexander Duyck * free memblock areas from a given point
3050e56acaeSAlexander Duyck * @i: u64 used as loop variable
3060e56acaeSAlexander Duyck * @zone: zone in which all of the memory blocks reside
3070e56acaeSAlexander Duyck * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
3080e56acaeSAlexander Duyck * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
3090e56acaeSAlexander Duyck *
3100e56acaeSAlexander Duyck * Walks over free (memory && !reserved) areas of memblock in a specific
3110e56acaeSAlexander Duyck * zone, continuing from current position. Available as soon as memblock is
3120e56acaeSAlexander Duyck * initialized.
3130e56acaeSAlexander Duyck */
3140e56acaeSAlexander Duyck #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
3150e56acaeSAlexander Duyck for (; i != U64_MAX; \
3160e56acaeSAlexander Duyck __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
317ecd09650SDaniel Jordan
318837566e7SAlexander Duyck #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
319837566e7SAlexander Duyck
32035fd0808STejun Heo /**
32135fd0808STejun Heo * for_each_free_mem_range - iterate through free memblock areas
32235fd0808STejun Heo * @i: u64 used as loop variable
323b1154233SGrygorii Strashko * @nid: node selector, %NUMA_NO_NODE for all nodes
324d30b5545SFlorian Fainelli * @flags: pick from blocks based on memory attributes
32535fd0808STejun Heo * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
32635fd0808STejun Heo * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
32735fd0808STejun Heo * @p_nid: ptr to int for nid of the range, can be %NULL
32835fd0808STejun Heo *
32935fd0808STejun Heo * Walks over free (memory && !reserved) areas of memblock. Available as
33035fd0808STejun Heo * soon as memblock is initialized.
33135fd0808STejun Heo */
332fc6daaf9STony Luck #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
3336e245ad4SMike Rapoport __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
334fc6daaf9STony Luck nid, flags, p_start, p_end, p_nid)
3357bd0b0f0STejun Heo
3367bd0b0f0STejun Heo /**
3377bd0b0f0STejun Heo * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
3387bd0b0f0STejun Heo * @i: u64 used as loop variable
339b1154233SGrygorii Strashko * @nid: node selector, %NUMA_NO_NODE for all nodes
340d30b5545SFlorian Fainelli * @flags: pick from blocks based on memory attributes
3417bd0b0f0STejun Heo * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
3427bd0b0f0STejun Heo * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
3437bd0b0f0STejun Heo * @p_nid: ptr to int for nid of the range, can be %NULL
3447bd0b0f0STejun Heo *
3457bd0b0f0STejun Heo * Walks over free (memory && !reserved) areas of memblock in reverse
3467bd0b0f0STejun Heo * order. Available as soon as memblock is initialized.
3477bd0b0f0STejun Heo */
348fc6daaf9STony Luck #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
349fc6daaf9STony Luck p_nid) \
3506e245ad4SMike Rapoport __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
351fc6daaf9STony Luck nid, flags, p_start, p_end, p_nid)
3527bd0b0f0STejun Heo
353e7e8de59STang Chen int memblock_set_node(phys_addr_t base, phys_addr_t size,
354e7e8de59STang Chen struct memblock_type *type, int nid);
3557c0caeb8STejun Heo
356a9ee6cf5SMike Rapoport #ifdef CONFIG_NUMA
memblock_set_region_node(struct memblock_region * r,int nid)3577c0caeb8STejun Heo static inline void memblock_set_region_node(struct memblock_region *r, int nid)
3587c0caeb8STejun Heo {
3597c0caeb8STejun Heo r->nid = nid;
3607c0caeb8STejun Heo }
3617c0caeb8STejun Heo
memblock_get_region_node(const struct memblock_region * r)3627c0caeb8STejun Heo static inline int memblock_get_region_node(const struct memblock_region *r)
3637c0caeb8STejun Heo {
3647c0caeb8STejun Heo return r->nid;
3657c0caeb8STejun Heo }
3667c0caeb8STejun Heo #else
memblock_set_region_node(struct memblock_region * r,int nid)3677c0caeb8STejun Heo static inline void memblock_set_region_node(struct memblock_region *r, int nid)
3687c0caeb8STejun Heo {
3697c0caeb8STejun Heo }
3707c0caeb8STejun Heo
memblock_get_region_node(const struct memblock_region * r)3717c0caeb8STejun Heo static inline int memblock_get_region_node(const struct memblock_region *r)
3727c0caeb8STejun Heo {
3737c0caeb8STejun Heo return 0;
3747c0caeb8STejun Heo }
375a9ee6cf5SMike Rapoport #endif /* CONFIG_NUMA */
3767c0caeb8STejun Heo
37757c8a661SMike Rapoport /* Flags for memblock allocation APIs */
37857c8a661SMike Rapoport #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
37957c8a661SMike Rapoport #define MEMBLOCK_ALLOC_ACCESSIBLE 0
380b2aad24bSGuo Weikang /*
381b2aad24bSGuo Weikang * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
382b2aad24bSGuo Weikang * MEMBLOCK_ALLOC_ACCESSIBLE
383b2aad24bSGuo Weikang */
384c6975d7cSQian Cai #define MEMBLOCK_ALLOC_NOLEAKTRACE 1
38557c8a661SMike Rapoport
38657c8a661SMike Rapoport /* We are using top down, so it is safe to use 0 here */
38757c8a661SMike Rapoport #define MEMBLOCK_LOW_LIMIT 0
38857c8a661SMike Rapoport
38957c8a661SMike Rapoport #ifndef ARCH_LOW_ADDRESS_LIMIT
39057c8a661SMike Rapoport #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
39157c8a661SMike Rapoport #endif
39257c8a661SMike Rapoport
3938a770c2aSMike Rapoport phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
3948a770c2aSMike Rapoport phys_addr_t start, phys_addr_t end);
3958676af1fSAslan Bakirov phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
3968676af1fSAslan Bakirov phys_addr_t align, phys_addr_t start,
3978676af1fSAslan Bakirov phys_addr_t end, int nid, bool exact_nid);
3989a8dd708SMike Rapoport phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
3999d1e2492SBenjamin Herrenschmidt
memblock_phys_alloc(phys_addr_t size,phys_addr_t align)400d7f55471SJackie Liu static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
401ecc3e771SMike Rapoport phys_addr_t align)
402ecc3e771SMike Rapoport {
403ecc3e771SMike Rapoport return memblock_phys_alloc_range(size, align, 0,
404ecc3e771SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE);
405ecc3e771SMike Rapoport }
406e63075a3SBenjamin Herrenschmidt
4070ac398b1SYunfeng Ye void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
4080ac398b1SYunfeng Ye phys_addr_t min_addr, phys_addr_t max_addr,
4090ac398b1SYunfeng Ye int nid);
41057c8a661SMike Rapoport void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
41157c8a661SMike Rapoport phys_addr_t min_addr, phys_addr_t max_addr,
41257c8a661SMike Rapoport int nid);
41357c8a661SMike Rapoport void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
41457c8a661SMike Rapoport phys_addr_t min_addr, phys_addr_t max_addr,
41557c8a661SMike Rapoport int nid);
41657c8a661SMike Rapoport
memblock_alloc(phys_addr_t size,phys_addr_t align)4175bdba520SFaiyaz Mohammed static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
41857c8a661SMike Rapoport {
41957c8a661SMike Rapoport return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
42057c8a661SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
42157c8a661SMike Rapoport }
42257c8a661SMike Rapoport
423*c6f23979SGuo Weikang void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
424*c6f23979SGuo Weikang const char *func);
425*c6f23979SGuo Weikang
426*c6f23979SGuo Weikang #define memblock_alloc_or_panic(size, align) \
427*c6f23979SGuo Weikang __memblock_alloc_or_panic(size, align, __func__)
428*c6f23979SGuo Weikang
memblock_alloc_raw(phys_addr_t size,phys_addr_t align)4295bdba520SFaiyaz Mohammed static inline void *memblock_alloc_raw(phys_addr_t size,
43057c8a661SMike Rapoport phys_addr_t align)
43157c8a661SMike Rapoport {
43257c8a661SMike Rapoport return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
43357c8a661SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE,
43457c8a661SMike Rapoport NUMA_NO_NODE);
43557c8a661SMike Rapoport }
43657c8a661SMike Rapoport
memblock_alloc_from(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr)4375bdba520SFaiyaz Mohammed static inline void *memblock_alloc_from(phys_addr_t size,
43857c8a661SMike Rapoport phys_addr_t align,
43957c8a661SMike Rapoport phys_addr_t min_addr)
44057c8a661SMike Rapoport {
44157c8a661SMike Rapoport return memblock_alloc_try_nid(size, align, min_addr,
44257c8a661SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
44357c8a661SMike Rapoport }
44457c8a661SMike Rapoport
memblock_alloc_low(phys_addr_t size,phys_addr_t align)4455bdba520SFaiyaz Mohammed static inline void *memblock_alloc_low(phys_addr_t size,
44657c8a661SMike Rapoport phys_addr_t align)
44757c8a661SMike Rapoport {
44857c8a661SMike Rapoport return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
44957c8a661SMike Rapoport ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
45057c8a661SMike Rapoport }
45157c8a661SMike Rapoport
memblock_alloc_node(phys_addr_t size,phys_addr_t align,int nid)4525bdba520SFaiyaz Mohammed static inline void *memblock_alloc_node(phys_addr_t size,
45357c8a661SMike Rapoport phys_addr_t align, int nid)
45457c8a661SMike Rapoport {
45557c8a661SMike Rapoport return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
45657c8a661SMike Rapoport MEMBLOCK_ALLOC_ACCESSIBLE, nid);
45757c8a661SMike Rapoport }
45857c8a661SMike Rapoport
45979442ed1STang Chen /*
46079442ed1STang Chen * Set the allocation direction to bottom-up or top-down.
46179442ed1STang Chen */
memblock_set_bottom_up(bool enable)462a024b7c2SMike Rapoport static inline __init_memblock void memblock_set_bottom_up(bool enable)
46379442ed1STang Chen {
46479442ed1STang Chen memblock.bottom_up = enable;
46579442ed1STang Chen }
46679442ed1STang Chen
46779442ed1STang Chen /*
46879442ed1STang Chen * Check if the allocation direction is bottom-up or not.
46979442ed1STang Chen * if this is true, that said, memblock will allocate memory
47079442ed1STang Chen * in bottom-up direction.
47179442ed1STang Chen */
memblock_bottom_up(void)472a024b7c2SMike Rapoport static inline __init_memblock bool memblock_bottom_up(void)
47379442ed1STang Chen {
47479442ed1STang Chen return memblock.bottom_up;
47579442ed1STang Chen }
47679442ed1STang Chen
477581adcbeSTejun Heo phys_addr_t memblock_phys_mem_size(void);
4788907de5dSSrikar Dronamraju phys_addr_t memblock_reserved_size(void);
479d0f8a897SWei Yang unsigned long memblock_estimated_nr_free_pages(void);
480581adcbeSTejun Heo phys_addr_t memblock_start_of_DRAM(void);
481581adcbeSTejun Heo phys_addr_t memblock_end_of_DRAM(void);
482581adcbeSTejun Heo void memblock_enforce_memory_limit(phys_addr_t memory_limit);
483c9ca9b4eSAKASHI Takahiro void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
484a571d4ebSDennis Chen void memblock_mem_limit_remove_map(phys_addr_t limit);
485b4ad0c7eSYaowei Bai bool memblock_is_memory(phys_addr_t addr);
486937f0c26SYaowei Bai bool memblock_is_map_memory(phys_addr_t addr);
487937f0c26SYaowei Bai bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
488b4ad0c7eSYaowei Bai bool memblock_is_reserved(phys_addr_t addr);
489c5c5c9d1STang Chen bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
49095f72d1eSYinghai Lu
49187c55870SMike Rapoport void memblock_dump_all(void);
49295f72d1eSYinghai Lu
493e63075a3SBenjamin Herrenschmidt /**
494e63075a3SBenjamin Herrenschmidt * memblock_set_current_limit - Set the current allocation limit to allow
495e63075a3SBenjamin Herrenschmidt * limiting allocations to what is currently
496e63075a3SBenjamin Herrenschmidt * accessible during boot
497e63075a3SBenjamin Herrenschmidt * @limit: New limit value (physical address)
498e63075a3SBenjamin Herrenschmidt */
499581adcbeSTejun Heo void memblock_set_current_limit(phys_addr_t limit);
500e63075a3SBenjamin Herrenschmidt
50135a1f0bdSBenjamin Herrenschmidt
502fec51014SLaura Abbott phys_addr_t memblock_get_current_limit(void);
503fec51014SLaura Abbott
5045b385f25SBenjamin Herrenschmidt /*
5055b385f25SBenjamin Herrenschmidt * pfn conversion functions
5065b385f25SBenjamin Herrenschmidt *
5075b385f25SBenjamin Herrenschmidt * While the memory MEMBLOCKs should always be page aligned, the reserved
5085b385f25SBenjamin Herrenschmidt * MEMBLOCKs may not be. This accessor attempt to provide a very clear
5095b385f25SBenjamin Herrenschmidt * idea of what they return for such non aligned MEMBLOCKs.
5105b385f25SBenjamin Herrenschmidt */
5115b385f25SBenjamin Herrenschmidt
5125b385f25SBenjamin Herrenschmidt /**
51347cec443SMike Rapoport * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
5145b385f25SBenjamin Herrenschmidt * @reg: memblock_region structure
51547cec443SMike Rapoport *
51647cec443SMike Rapoport * Return: the lowest pfn intersecting with the memory region
5175b385f25SBenjamin Herrenschmidt */
memblock_region_memory_base_pfn(const struct memblock_region * reg)518c7fc2de0SYinghai Lu static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
5195b385f25SBenjamin Herrenschmidt {
520c7fc2de0SYinghai Lu return PFN_UP(reg->base);
5215b385f25SBenjamin Herrenschmidt }
5225b385f25SBenjamin Herrenschmidt
5235b385f25SBenjamin Herrenschmidt /**
52447cec443SMike Rapoport * memblock_region_memory_end_pfn - get the end pfn of the memory region
5255b385f25SBenjamin Herrenschmidt * @reg: memblock_region structure
52647cec443SMike Rapoport *
52747cec443SMike Rapoport * Return: the end_pfn of the reserved region
5285b385f25SBenjamin Herrenschmidt */
memblock_region_memory_end_pfn(const struct memblock_region * reg)529c7fc2de0SYinghai Lu static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
5305b385f25SBenjamin Herrenschmidt {
531c7fc2de0SYinghai Lu return PFN_DOWN(reg->base + reg->size);
5325b385f25SBenjamin Herrenschmidt }
5335b385f25SBenjamin Herrenschmidt
5345b385f25SBenjamin Herrenschmidt /**
53547cec443SMike Rapoport * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
5365b385f25SBenjamin Herrenschmidt * @reg: memblock_region structure
53747cec443SMike Rapoport *
53847cec443SMike Rapoport * Return: the lowest pfn intersecting with the reserved region
5395b385f25SBenjamin Herrenschmidt */
memblock_region_reserved_base_pfn(const struct memblock_region * reg)540c7fc2de0SYinghai Lu static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
5415b385f25SBenjamin Herrenschmidt {
542c7fc2de0SYinghai Lu return PFN_DOWN(reg->base);
5435b385f25SBenjamin Herrenschmidt }
5445b385f25SBenjamin Herrenschmidt
5455b385f25SBenjamin Herrenschmidt /**
54647cec443SMike Rapoport * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
5475b385f25SBenjamin Herrenschmidt * @reg: memblock_region structure
54847cec443SMike Rapoport *
54947cec443SMike Rapoport * Return: the end_pfn of the reserved region
5505b385f25SBenjamin Herrenschmidt */
memblock_region_reserved_end_pfn(const struct memblock_region * reg)551c7fc2de0SYinghai Lu static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
5525b385f25SBenjamin Herrenschmidt {
553c7fc2de0SYinghai Lu return PFN_UP(reg->base + reg->size);
5545b385f25SBenjamin Herrenschmidt }
5555b385f25SBenjamin Herrenschmidt
556cc6de168SMike Rapoport /**
55793bbbcb1SWei Yang * for_each_mem_region - iterate over memory regions
558cc6de168SMike Rapoport * @region: loop variable
559cc6de168SMike Rapoport */
560cc6de168SMike Rapoport #define for_each_mem_region(region) \
561cc6de168SMike Rapoport for (region = memblock.memory.regions; \
562cc6de168SMike Rapoport region < (memblock.memory.regions + memblock.memory.cnt); \
563cc6de168SMike Rapoport region++)
564cc6de168SMike Rapoport
565cc6de168SMike Rapoport /**
566cc6de168SMike Rapoport * for_each_reserved_mem_region - itereate over reserved memory regions
567cc6de168SMike Rapoport * @region: loop variable
568cc6de168SMike Rapoport */
569cc6de168SMike Rapoport #define for_each_reserved_mem_region(region) \
570cc6de168SMike Rapoport for (region = memblock.reserved.regions; \
571cc6de168SMike Rapoport region < (memblock.reserved.regions + memblock.reserved.cnt); \
5725b385f25SBenjamin Herrenschmidt region++)
5735b385f25SBenjamin Herrenschmidt
57457c8a661SMike Rapoport extern void *alloc_large_system_hash(const char *tablename,
57557c8a661SMike Rapoport unsigned long bucketsize,
57657c8a661SMike Rapoport unsigned long numentries,
57757c8a661SMike Rapoport int scale,
57857c8a661SMike Rapoport int flags,
57957c8a661SMike Rapoport unsigned int *_hash_shift,
58057c8a661SMike Rapoport unsigned int *_hash_mask,
58157c8a661SMike Rapoport unsigned long low_limit,
58257c8a661SMike Rapoport unsigned long high_limit);
58357c8a661SMike Rapoport
58457c8a661SMike Rapoport #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
5853fade62bSMiaohe Lin #define HASH_ZERO 0x00000002 /* Zero allocated hash table */
58657c8a661SMike Rapoport
58757c8a661SMike Rapoport /* Only NUMA needs hash distribution. 64bit NUMA architectures have
58857c8a661SMike Rapoport * sufficient vmalloc space.
58957c8a661SMike Rapoport */
59057c8a661SMike Rapoport #ifdef CONFIG_NUMA
59157c8a661SMike Rapoport #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
59257c8a661SMike Rapoport extern int hashdist; /* Distribute hashes across NUMA nodes? */
59357c8a661SMike Rapoport #else
59457c8a661SMike Rapoport #define hashdist (0)
59557c8a661SMike Rapoport #endif
59657c8a661SMike Rapoport
5974a20799dSVladimir Murzin #ifdef CONFIG_MEMTEST
5983f32c49eSKefeng Wang void early_memtest(phys_addr_t start, phys_addr_t end);
5993f32c49eSKefeng Wang void memtest_report_meminfo(struct seq_file *m);
6004a20799dSVladimir Murzin #else
early_memtest(phys_addr_t start,phys_addr_t end)6013f32c49eSKefeng Wang static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
memtest_report_meminfo(struct seq_file * m)6023f32c49eSKefeng Wang static inline void memtest_report_meminfo(struct seq_file *m) { }
6034a20799dSVladimir Murzin #endif
604f0b37fadSYinghai Lu
60595f72d1eSYinghai Lu
60695f72d1eSYinghai Lu #endif /* _LINUX_MEMBLOCK_H */
607