xref: /linux-6.15/include/linux/gfp.h (revision f90b474a)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds #ifndef __LINUX_GFP_H
31da177e4SLinus Torvalds #define __LINUX_GFP_H
41da177e4SLinus Torvalds 
5cb5a065bSIngo Molnar #include <linux/gfp_types.h>
6cb5a065bSIngo Molnar 
71da177e4SLinus Torvalds #include <linux/mmzone.h>
8082edb7bSRusty Russell #include <linux/topology.h>
9b951aaffSSuren Baghdasaryan #include <linux/alloc_tag.h>
10b951aaffSSuren Baghdasaryan #include <linux/sched.h>
111da177e4SLinus Torvalds 
121da177e4SLinus Torvalds struct vm_area_struct;
13ddc1a5cbSHugh Dickins struct mempolicy;
141da177e4SLinus Torvalds 
15dd56b046SMel Gorman /* Convert GFP flags to their corresponding migrate type */
16e12ba74dSMel Gorman #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
17016c13daSMel Gorman #define GFP_MOVABLE_SHIFT 3
186cb06229SChristoph Lameter 
gfp_migratetype(const gfp_t gfp_flags)1901c0bfe0SWei Yang static inline int gfp_migratetype(const gfp_t gfp_flags)
20467c996cSMel Gorman {
21016c13daSMel Gorman 	VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
22016c13daSMel Gorman 	BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
23016c13daSMel Gorman 	BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
244d86d4f7SPeter Collingbourne 	BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
254d86d4f7SPeter Collingbourne 	BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
264d86d4f7SPeter Collingbourne 		      GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
27467c996cSMel Gorman 
28467c996cSMel Gorman 	if (unlikely(page_group_by_mobility_disabled))
29467c996cSMel Gorman 		return MIGRATE_UNMOVABLE;
30467c996cSMel Gorman 
31467c996cSMel Gorman 	/* Group based on mobility */
32fe573327SVasily Averin 	return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
33467c996cSMel Gorman }
34dd56b046SMel Gorman #undef GFP_MOVABLE_MASK
35dd56b046SMel Gorman #undef GFP_MOVABLE_SHIFT
36a2f1b424SAndi Kleen 
gfpflags_allow_blocking(const gfp_t gfp_flags)37d0164adcSMel Gorman static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
38d0164adcSMel Gorman {
39543dfb2dSJoshua Clayton 	return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
40d0164adcSMel Gorman }
41d0164adcSMel Gorman 
gfpflags_allow_spinning(const gfp_t gfp_flags)4297769a53SAlexei Starovoitov static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags)
4397769a53SAlexei Starovoitov {
4497769a53SAlexei Starovoitov 	/*
4597769a53SAlexei Starovoitov 	 * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed.
4697769a53SAlexei Starovoitov 	 * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd.
4797769a53SAlexei Starovoitov 	 * All GFP_* flags including GFP_NOWAIT use one or both flags.
4897769a53SAlexei Starovoitov 	 * try_alloc_pages() is the only API that doesn't specify either flag.
4997769a53SAlexei Starovoitov 	 *
5097769a53SAlexei Starovoitov 	 * This is stronger than GFP_NOWAIT or GFP_ATOMIC because
5197769a53SAlexei Starovoitov 	 * those are guaranteed to never block on a sleeping lock.
5297769a53SAlexei Starovoitov 	 * Here we are enforcing that the allocation doesn't ever spin
5397769a53SAlexei Starovoitov 	 * on any locks (i.e. only trylocks). There is no high level
5497769a53SAlexei Starovoitov 	 * GFP_$FOO flag for this use in try_alloc_pages() as the
5597769a53SAlexei Starovoitov 	 * regular page allocator doesn't fully support this
5697769a53SAlexei Starovoitov 	 * allocation mode.
5797769a53SAlexei Starovoitov 	 */
58*f90b474aSVlastimil Babka 	return !!(gfp_flags & __GFP_RECLAIM);
5997769a53SAlexei Starovoitov }
6097769a53SAlexei Starovoitov 
61b70d94eeSChristoph Lameter #ifdef CONFIG_HIGHMEM
62b70d94eeSChristoph Lameter #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
63b70d94eeSChristoph Lameter #else
64b70d94eeSChristoph Lameter #define OPT_ZONE_HIGHMEM ZONE_NORMAL
65b70d94eeSChristoph Lameter #endif
66b70d94eeSChristoph Lameter 
67b70d94eeSChristoph Lameter #ifdef CONFIG_ZONE_DMA
68b70d94eeSChristoph Lameter #define OPT_ZONE_DMA ZONE_DMA
69b70d94eeSChristoph Lameter #else
70b70d94eeSChristoph Lameter #define OPT_ZONE_DMA ZONE_NORMAL
71b70d94eeSChristoph Lameter #endif
72b70d94eeSChristoph Lameter 
73b70d94eeSChristoph Lameter #ifdef CONFIG_ZONE_DMA32
74b70d94eeSChristoph Lameter #define OPT_ZONE_DMA32 ZONE_DMA32
75b70d94eeSChristoph Lameter #else
76b70d94eeSChristoph Lameter #define OPT_ZONE_DMA32 ZONE_NORMAL
77b70d94eeSChristoph Lameter #endif
78b70d94eeSChristoph Lameter 
79b70d94eeSChristoph Lameter /*
80b70d94eeSChristoph Lameter  * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
81ac2e8e40SHao Lee  * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT
82ac2e8e40SHao Lee  * bits long and there are 16 of them to cover all possible combinations of
83263ff5d8Smatt mooney  * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
84b70d94eeSChristoph Lameter  *
85b70d94eeSChristoph Lameter  * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
86b70d94eeSChristoph Lameter  * But GFP_MOVABLE is not only a zone specifier but also an allocation
87b70d94eeSChristoph Lameter  * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
88263ff5d8Smatt mooney  * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
89b70d94eeSChristoph Lameter  *
90b70d94eeSChristoph Lameter  *       bit       result
91b70d94eeSChristoph Lameter  *       =================
92b70d94eeSChristoph Lameter  *       0x0    => NORMAL
93b70d94eeSChristoph Lameter  *       0x1    => DMA or NORMAL
94b70d94eeSChristoph Lameter  *       0x2    => HIGHMEM or NORMAL
95b70d94eeSChristoph Lameter  *       0x3    => BAD (DMA+HIGHMEM)
964b33b695SHuaisheng Ye  *       0x4    => DMA32 or NORMAL
97b70d94eeSChristoph Lameter  *       0x5    => BAD (DMA+DMA32)
98b70d94eeSChristoph Lameter  *       0x6    => BAD (HIGHMEM+DMA32)
99b70d94eeSChristoph Lameter  *       0x7    => BAD (HIGHMEM+DMA32+DMA)
100b70d94eeSChristoph Lameter  *       0x8    => NORMAL (MOVABLE+0)
101b70d94eeSChristoph Lameter  *       0x9    => DMA or NORMAL (MOVABLE+DMA)
102b70d94eeSChristoph Lameter  *       0xa    => MOVABLE (Movable is valid only if HIGHMEM is set too)
103b70d94eeSChristoph Lameter  *       0xb    => BAD (MOVABLE+HIGHMEM+DMA)
1044b33b695SHuaisheng Ye  *       0xc    => DMA32 or NORMAL (MOVABLE+DMA32)
105b70d94eeSChristoph Lameter  *       0xd    => BAD (MOVABLE+DMA32+DMA)
106b70d94eeSChristoph Lameter  *       0xe    => BAD (MOVABLE+DMA32+HIGHMEM)
107b70d94eeSChristoph Lameter  *       0xf    => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
108b70d94eeSChristoph Lameter  *
109b11a7b94SDan Williams  * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
110b70d94eeSChristoph Lameter  */
111b70d94eeSChristoph Lameter 
112b11a7b94SDan Williams #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
113b11a7b94SDan Williams /* ZONE_DEVICE is not a valid GFP zone specifier */
114b11a7b94SDan Williams #define GFP_ZONES_SHIFT 2
115b11a7b94SDan Williams #else
116b11a7b94SDan Williams #define GFP_ZONES_SHIFT ZONES_SHIFT
117b11a7b94SDan Williams #endif
118b11a7b94SDan Williams 
119b11a7b94SDan Williams #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
120b11a7b94SDan Williams #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
121b70d94eeSChristoph Lameter #endif
122b70d94eeSChristoph Lameter 
123b70d94eeSChristoph Lameter #define GFP_ZONE_TABLE ( \
124b11a7b94SDan Williams 	(ZONE_NORMAL << 0 * GFP_ZONES_SHIFT)				       \
125b11a7b94SDan Williams 	| (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT)		       \
126b11a7b94SDan Williams 	| (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT)	       \
127b11a7b94SDan Williams 	| (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT)		       \
128b11a7b94SDan Williams 	| (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT)		       \
129b11a7b94SDan Williams 	| (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT)    \
130b11a7b94SDan Williams 	| (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
131b11a7b94SDan Williams 	| (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
132b70d94eeSChristoph Lameter )
133b70d94eeSChristoph Lameter 
134b70d94eeSChristoph Lameter /*
135263ff5d8Smatt mooney  * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
136b70d94eeSChristoph Lameter  * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
137b70d94eeSChristoph Lameter  * entry starting with bit 0. Bit is set if the combination is not
138b70d94eeSChristoph Lameter  * allowed.
139b70d94eeSChristoph Lameter  */
140b70d94eeSChristoph Lameter #define GFP_ZONE_BAD ( \
14116b56cf4SNamhyung Kim 	1 << (___GFP_DMA | ___GFP_HIGHMEM)				      \
14216b56cf4SNamhyung Kim 	| 1 << (___GFP_DMA | ___GFP_DMA32)				      \
14316b56cf4SNamhyung Kim 	| 1 << (___GFP_DMA32 | ___GFP_HIGHMEM)				      \
14416b56cf4SNamhyung Kim 	| 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM)		      \
14516b56cf4SNamhyung Kim 	| 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA)		      \
14616b56cf4SNamhyung Kim 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA)		      \
14716b56cf4SNamhyung Kim 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM)		      \
14816b56cf4SNamhyung Kim 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM)  \
149b70d94eeSChristoph Lameter )
150b70d94eeSChristoph Lameter 
gfp_zone(gfp_t flags)15119655d34SChristoph Lameter static inline enum zone_type gfp_zone(gfp_t flags)
1524e4785bcSChristoph Lameter {
153b70d94eeSChristoph Lameter 	enum zone_type z;
15416b56cf4SNamhyung Kim 	int bit = (__force int) (flags & GFP_ZONEMASK);
155b70d94eeSChristoph Lameter 
156b11a7b94SDan Williams 	z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
157b11a7b94SDan Williams 					 ((1 << GFP_ZONES_SHIFT) - 1);
15882d4b577SDave Hansen 	VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
159b70d94eeSChristoph Lameter 	return z;
1604e4785bcSChristoph Lameter }
1614e4785bcSChristoph Lameter 
1621da177e4SLinus Torvalds /*
1631da177e4SLinus Torvalds  * There is only one page-allocator function, and two main namespaces to
1641da177e4SLinus Torvalds  * it. The alloc_page*() variants return 'struct page *' and as such
1651da177e4SLinus Torvalds  * can allocate highmem pages, the *get*page*() variants return
1661da177e4SLinus Torvalds  * virtual kernel addresses to the allocated page(s).
1671da177e4SLinus Torvalds  */
1681da177e4SLinus Torvalds 
gfp_zonelist(gfp_t flags)16954a6eb5cSMel Gorman static inline int gfp_zonelist(gfp_t flags)
17054a6eb5cSMel Gorman {
171c00eb15aSYaowei Bai #ifdef CONFIG_NUMA
172c00eb15aSYaowei Bai 	if (unlikely(flags & __GFP_THISNODE))
173c00eb15aSYaowei Bai 		return ZONELIST_NOFALLBACK;
174c00eb15aSYaowei Bai #endif
175c00eb15aSYaowei Bai 	return ZONELIST_FALLBACK;
17654a6eb5cSMel Gorman }
17754a6eb5cSMel Gorman 
1781da177e4SLinus Torvalds /*
1791c00f936SDave Chinner  * gfp flag masking for nested internal allocations.
1801c00f936SDave Chinner  *
1811c00f936SDave Chinner  * For code that needs to do allocations inside the public allocation API (e.g.
1821c00f936SDave Chinner  * memory allocation tracking code) the allocations need to obey the caller
1831c00f936SDave Chinner  * allocation context constrains to prevent allocation context mismatches (e.g.
1841c00f936SDave Chinner  * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
1851c00f936SDave Chinner  * situations.
1861c00f936SDave Chinner  *
1871c00f936SDave Chinner  * It is also assumed that these nested allocations are for internal kernel
1881c00f936SDave Chinner  * object storage purposes only and are not going to be used for DMA, etc. Hence
1891c00f936SDave Chinner  * we strip out all the zone information and leave just the context information
1901c00f936SDave Chinner  * intact.
1911c00f936SDave Chinner  *
1921c00f936SDave Chinner  * Further, internal allocations must fail before the higher level allocation
1931c00f936SDave Chinner  * can fail, so we must make them fail faster and fail silently. We also don't
1941c00f936SDave Chinner  * want them to deplete emergency reserves.  Hence nested allocations must be
1951c00f936SDave Chinner  * prepared for these allocations to fail.
1961c00f936SDave Chinner  */
gfp_nested_mask(gfp_t flags)1971c00f936SDave Chinner static inline gfp_t gfp_nested_mask(gfp_t flags)
1981c00f936SDave Chinner {
1991c00f936SDave Chinner 	return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
2001c00f936SDave Chinner 		(__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
2011c00f936SDave Chinner }
2021c00f936SDave Chinner 
2031c00f936SDave Chinner /*
2041da177e4SLinus Torvalds  * We get the zone list from the current node and the gfp_mask.
205cb152a1aSShijie Luo  * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
20654a6eb5cSMel Gorman  * There are two zonelists per node, one for all zones with memory and
20754a6eb5cSMel Gorman  * one containing just zones from the node the zonelist belongs to.
2081da177e4SLinus Torvalds  *
209d3c251abSMike Rapoport  * For the case of non-NUMA systems the NODE_DATA() gets optimized to
210d3c251abSMike Rapoport  * &contig_page_data at compile-time.
2111da177e4SLinus Torvalds  */
node_zonelist(int nid,gfp_t flags)2120e88460dSMel Gorman static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
2130e88460dSMel Gorman {
21454a6eb5cSMel Gorman 	return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
2150e88460dSMel Gorman }
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds #ifndef HAVE_ARCH_FREE_PAGE
arch_free_page(struct page * page,int order)2181da177e4SLinus Torvalds static inline void arch_free_page(struct page *page, int order) { }
2191da177e4SLinus Torvalds #endif
220cc102509SNick Piggin #ifndef HAVE_ARCH_ALLOC_PAGE
arch_alloc_page(struct page * page,int order)221cc102509SNick Piggin static inline void arch_alloc_page(struct page *page, int order) { }
222cc102509SNick Piggin #endif
2231da177e4SLinus Torvalds 
224b951aaffSSuren Baghdasaryan struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
22504ec6264SVlastimil Babka 		nodemask_t *nodemask);
226b951aaffSSuren Baghdasaryan #define __alloc_pages(...)			alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
2271da177e4SLinus Torvalds 
228b951aaffSSuren Baghdasaryan struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
229b951aaffSSuren Baghdasaryan 		nodemask_t *nodemask);
230b951aaffSSuren Baghdasaryan #define __folio_alloc(...)			alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
231b951aaffSSuren Baghdasaryan 
232b951aaffSSuren Baghdasaryan unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
233387ba26fSMel Gorman 				nodemask_t *nodemask, int nr_pages,
2340f87d9d3SMel Gorman 				struct page **page_array);
235b951aaffSSuren Baghdasaryan #define __alloc_pages_bulk(...)			alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
236387ba26fSMel Gorman 
2376bf9b5b4SLuiz Capitulino unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
238c00b6b96SChen Wandun 				unsigned long nr_pages,
239c00b6b96SChen Wandun 				struct page **page_array);
2406bf9b5b4SLuiz Capitulino #define  alloc_pages_bulk_mempolicy(...)				\
2416bf9b5b4SLuiz Capitulino 	alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
242c00b6b96SChen Wandun 
243387ba26fSMel Gorman /* Bulk allocate order-0 pages */
2446bf9b5b4SLuiz Capitulino #define alloc_pages_bulk(_gfp, _nr_pages, _page_array)		\
245c8b97953SLuiz Capitulino 	__alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
2460f87d9d3SMel Gorman 
2470f87d9d3SMel Gorman static inline unsigned long
alloc_pages_bulk_node_noprof(gfp_t gfp,int nid,unsigned long nr_pages,struct page ** page_array)2486bf9b5b4SLuiz Capitulino alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
249b951aaffSSuren Baghdasaryan 				   struct page **page_array)
250a2afc59fSUladzislau Rezki (Sony) {
251a2afc59fSUladzislau Rezki (Sony) 	if (nid == NUMA_NO_NODE)
252a2afc59fSUladzislau Rezki (Sony) 		nid = numa_mem_id();
253a2afc59fSUladzislau Rezki (Sony) 
254c8b97953SLuiz Capitulino 	return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
255a2afc59fSUladzislau Rezki (Sony) }
256a2afc59fSUladzislau Rezki (Sony) 
2576bf9b5b4SLuiz Capitulino #define alloc_pages_bulk_node(...)				\
2586bf9b5b4SLuiz Capitulino 	alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
259b951aaffSSuren Baghdasaryan 
warn_if_node_offline(int this_node,gfp_t gfp_mask)260dec1d352SYang Shi static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
261dec1d352SYang Shi {
262dec1d352SYang Shi 	gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
263dec1d352SYang Shi 
264dec1d352SYang Shi 	if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
265dec1d352SYang Shi 		return;
266dec1d352SYang Shi 
267dec1d352SYang Shi 	if (node_online(this_node))
268dec1d352SYang Shi 		return;
269dec1d352SYang Shi 
270dec1d352SYang Shi 	pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
271dec1d352SYang Shi 	dump_stack();
272dec1d352SYang Shi }
273dec1d352SYang Shi 
27496db800fSVlastimil Babka /*
27596db800fSVlastimil Babka  * Allocate pages, preferring the node given as nid. The node must be valid and
27696db800fSVlastimil Babka  * online. For more general interface, see alloc_pages_node().
27796db800fSVlastimil Babka  */
27896db800fSVlastimil Babka static inline struct page *
__alloc_pages_node_noprof(int nid,gfp_t gfp_mask,unsigned int order)279b951aaffSSuren Baghdasaryan __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
28096db800fSVlastimil Babka {
2810bc35a97SVlastimil Babka 	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
282dec1d352SYang Shi 	warn_if_node_offline(nid, gfp_mask);
28396db800fSVlastimil Babka 
284b951aaffSSuren Baghdasaryan 	return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
28596db800fSVlastimil Babka }
28696db800fSVlastimil Babka 
287b951aaffSSuren Baghdasaryan #define  __alloc_pages_node(...)		alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
288b951aaffSSuren Baghdasaryan 
289cc09cb13SMatthew Wilcox (Oracle) static inline
__folio_alloc_node_noprof(gfp_t gfp,unsigned int order,int nid)290b951aaffSSuren Baghdasaryan struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
291cc09cb13SMatthew Wilcox (Oracle) {
292cc09cb13SMatthew Wilcox (Oracle) 	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
293dec1d352SYang Shi 	warn_if_node_offline(nid, gfp);
294cc09cb13SMatthew Wilcox (Oracle) 
295b951aaffSSuren Baghdasaryan 	return __folio_alloc_noprof(gfp, order, nid, NULL);
296cc09cb13SMatthew Wilcox (Oracle) }
297cc09cb13SMatthew Wilcox (Oracle) 
298b951aaffSSuren Baghdasaryan #define  __folio_alloc_node(...)		alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
299b951aaffSSuren Baghdasaryan 
30096db800fSVlastimil Babka /*
30196db800fSVlastimil Babka  * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
30282c1fc71SVlastimil Babka  * prefer the current CPU's closest node. Otherwise node must be valid and
30382c1fc71SVlastimil Babka  * online.
30496db800fSVlastimil Babka  */
alloc_pages_node_noprof(int nid,gfp_t gfp_mask,unsigned int order)305b951aaffSSuren Baghdasaryan static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
3061da177e4SLinus Torvalds 						   unsigned int order)
3071da177e4SLinus Torvalds {
3080bc35a97SVlastimil Babka 	if (nid == NUMA_NO_NODE)
30982c1fc71SVlastimil Babka 		nid = numa_mem_id();
310819a6928SAndi Kleen 
311b951aaffSSuren Baghdasaryan 	return __alloc_pages_node_noprof(nid, gfp_mask, order);
3121da177e4SLinus Torvalds }
3131da177e4SLinus Torvalds 
314b951aaffSSuren Baghdasaryan #define  alloc_pages_node(...)			alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
315b951aaffSSuren Baghdasaryan 
3161da177e4SLinus Torvalds #ifdef CONFIG_NUMA
317b951aaffSSuren Baghdasaryan struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
318b951aaffSSuren Baghdasaryan struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
319a19621edSKefeng Wang struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
320a19621edSKefeng Wang 		struct mempolicy *mpol, pgoff_t ilx, int nid);
321b951aaffSSuren Baghdasaryan struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
3226359c39cSKefeng Wang 		unsigned long addr);
3231da177e4SLinus Torvalds #else
alloc_pages_noprof(gfp_t gfp_mask,unsigned int order)324b951aaffSSuren Baghdasaryan static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
32543ee5b6dSChristoph Hellwig {
326b951aaffSSuren Baghdasaryan 	return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
32743ee5b6dSChristoph Hellwig }
folio_alloc_noprof(gfp_t gfp,unsigned int order)328b951aaffSSuren Baghdasaryan static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
329cc09cb13SMatthew Wilcox (Oracle) {
3300a6fff20SKent Overstreet 	return __folio_alloc_node_noprof(gfp, order, numa_node_id());
331cc09cb13SMatthew Wilcox (Oracle) }
folio_alloc_mpol_noprof(gfp_t gfp,unsigned int order,struct mempolicy * mpol,pgoff_t ilx,int nid)332a19621edSKefeng Wang static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
333a19621edSKefeng Wang 		struct mempolicy *mpol, pgoff_t ilx, int nid)
334a19621edSKefeng Wang {
335a19621edSKefeng Wang 	return folio_alloc_noprof(gfp, order);
336a19621edSKefeng Wang }
3376359c39cSKefeng Wang #define vma_alloc_folio_noprof(gfp, order, vma, addr)		\
338b951aaffSSuren Baghdasaryan 	folio_alloc_noprof(gfp, order)
3391da177e4SLinus Torvalds #endif
340b951aaffSSuren Baghdasaryan 
341b951aaffSSuren Baghdasaryan #define alloc_pages(...)			alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
342b951aaffSSuren Baghdasaryan #define folio_alloc(...)			alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
343a19621edSKefeng Wang #define folio_alloc_mpol(...)			alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
344b951aaffSSuren Baghdasaryan #define vma_alloc_folio(...)			alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
345b951aaffSSuren Baghdasaryan 
3461da177e4SLinus Torvalds #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
347b951aaffSSuren Baghdasaryan 
alloc_page_vma_noprof(gfp_t gfp,struct vm_area_struct * vma,unsigned long addr)348b951aaffSSuren Baghdasaryan static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
349adf88aa8SMatthew Wilcox (Oracle) 		struct vm_area_struct *vma, unsigned long addr)
350adf88aa8SMatthew Wilcox (Oracle) {
3516359c39cSKefeng Wang 	struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
352adf88aa8SMatthew Wilcox (Oracle) 
353adf88aa8SMatthew Wilcox (Oracle) 	return &folio->page;
354adf88aa8SMatthew Wilcox (Oracle) }
355b951aaffSSuren Baghdasaryan #define alloc_page_vma(...)			alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
3561da177e4SLinus Torvalds 
35797769a53SAlexei Starovoitov struct page *try_alloc_pages_noprof(int nid, unsigned int order);
35897769a53SAlexei Starovoitov #define try_alloc_pages(...)			alloc_hooks(try_alloc_pages_noprof(__VA_ARGS__))
35997769a53SAlexei Starovoitov 
360b951aaffSSuren Baghdasaryan extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
361b951aaffSSuren Baghdasaryan #define __get_free_pages(...)			alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
3621da177e4SLinus Torvalds 
363b951aaffSSuren Baghdasaryan extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
364b951aaffSSuren Baghdasaryan #define get_zeroed_page(...)			alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
365b951aaffSSuren Baghdasaryan 
366b951aaffSSuren Baghdasaryan void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
367b951aaffSSuren Baghdasaryan #define alloc_pages_exact(...)			alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
368b951aaffSSuren Baghdasaryan 
3692be0ffe2STimur Tabi void free_pages_exact(void *virt, size_t size);
370b951aaffSSuren Baghdasaryan 
371b951aaffSSuren Baghdasaryan __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
372b951aaffSSuren Baghdasaryan #define alloc_pages_exact_nid(...)					\
373b951aaffSSuren Baghdasaryan 	alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
3742be0ffe2STimur Tabi 
3751da177e4SLinus Torvalds #define __get_free_page(gfp_mask)					\
3761da177e4SLinus Torvalds 	__get_free_pages((gfp_mask), 0)
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds #define __get_dma_pages(gfp_mask, order)				\
3791da177e4SLinus Torvalds 	__get_free_pages((gfp_mask) | GFP_DMA, (order))
3801da177e4SLinus Torvalds 
381b3c97528SHarvey Harrison extern void __free_pages(struct page *page, unsigned int order);
3828c57b687SAlexei Starovoitov extern void free_pages_nolock(struct page *page, unsigned int order);
383b3c97528SHarvey Harrison extern void free_pages(unsigned long addr, unsigned int order);
3841da177e4SLinus Torvalds 
3851da177e4SLinus Torvalds #define __free_page(page) __free_pages((page), 0)
3861da177e4SLinus Torvalds #define free_page(addr) free_pages((addr), 0)
3871da177e4SLinus Torvalds 
388c4fbed4bSMike Rapoport (IBM) void page_alloc_init_cpuhp(void);
38951a755c5SHuang Ying int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
3904037d452SChristoph Lameter void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
39193481ff0SVlastimil Babka void drain_all_pages(struct zone *zone);
39293481ff0SVlastimil Babka void drain_local_pages(struct zone *zone);
3931da177e4SLinus Torvalds 
3940e1cc95bSMel Gorman void page_alloc_init_late(void);
3955cec4eb7SHuang Ying void setup_pcp_cacheinfo(unsigned int cpu);
3960e1cc95bSMel Gorman 
397f90ac398SMel Gorman /*
398f90ac398SMel Gorman  * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
399f90ac398SMel Gorman  * GFP flags are used before interrupts are enabled. Once interrupts are
400f90ac398SMel Gorman  * enabled, it is set to __GFP_BITS_MASK while the system is running. During
401f90ac398SMel Gorman  * hibernation, it is used by PM to avoid I/O during memory allocation while
402f90ac398SMel Gorman  * devices are suspended.
403f90ac398SMel Gorman  */
404dcce284aSBenjamin Herrenschmidt extern gfp_t gfp_allowed_mask;
405dcce284aSBenjamin Herrenschmidt 
406c93bdd0eSMel Gorman /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
407c93bdd0eSMel Gorman bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
408c93bdd0eSMel Gorman 
gfp_has_io_fs(gfp_t gfp)40907f44ac3SKefeng Wang static inline bool gfp_has_io_fs(gfp_t gfp)
41007f44ac3SKefeng Wang {
41107f44ac3SKefeng Wang 	return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
41207f44ac3SKefeng Wang }
413dcce284aSBenjamin Herrenschmidt 
414803de900SVlastimil Babka /*
415803de900SVlastimil Babka  * Check if the gfp flags allow compaction - GFP_NOIO is a really
416803de900SVlastimil Babka  * tricky context because the migration might require IO.
417803de900SVlastimil Babka  */
gfp_compaction_allowed(gfp_t gfp_mask)418803de900SVlastimil Babka static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
419803de900SVlastimil Babka {
420803de900SVlastimil Babka 	return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
421803de900SVlastimil Babka }
422803de900SVlastimil Babka 
423164cc4feSRik van Riel extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
424164cc4feSRik van Riel 
4258df995f6SAlexandre Ghiti #ifdef CONFIG_CONTIG_ALLOC
426041d3a8cSMichal Nazarewicz /* The below functions must be run on a range from a single zone. */
427b951aaffSSuren Baghdasaryan extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
428ca96b625SLucas Stach 			      unsigned migratetype, gfp_t gfp_mask);
429b951aaffSSuren Baghdasaryan #define alloc_contig_range(...)			alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
430b951aaffSSuren Baghdasaryan 
431b951aaffSSuren Baghdasaryan extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
4325e27a2dfSAnshuman Khandual 					      int nid, nodemask_t *nodemask);
433b951aaffSSuren Baghdasaryan #define alloc_contig_pages(...)			alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
434b951aaffSSuren Baghdasaryan 
435080fe206SVlastimil Babka #endif
43678fa5150SMinchan Kim void free_contig_range(unsigned long pfn, unsigned long nr_pages);
437041d3a8cSMichal Nazarewicz 
438e98337d1SYu Zhao #ifdef CONFIG_CONTIG_ALLOC
folio_alloc_gigantic_noprof(int order,gfp_t gfp,int nid,nodemask_t * node)439e98337d1SYu Zhao static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
440e98337d1SYu Zhao 							int nid, nodemask_t *node)
441e98337d1SYu Zhao {
442e98337d1SYu Zhao 	struct page *page;
443e98337d1SYu Zhao 
444e98337d1SYu Zhao 	if (WARN_ON(!order || !(gfp & __GFP_COMP)))
445e98337d1SYu Zhao 		return NULL;
446e98337d1SYu Zhao 
447e98337d1SYu Zhao 	page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
448e98337d1SYu Zhao 
449e98337d1SYu Zhao 	return page ? page_folio(page) : NULL;
450e98337d1SYu Zhao }
451e98337d1SYu Zhao #else
folio_alloc_gigantic_noprof(int order,gfp_t gfp,int nid,nodemask_t * node)452e98337d1SYu Zhao static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
453e98337d1SYu Zhao 							int nid, nodemask_t *node)
454e98337d1SYu Zhao {
455e98337d1SYu Zhao 	return NULL;
456e98337d1SYu Zhao }
457e98337d1SYu Zhao #endif
458e98337d1SYu Zhao /* This should be paired with folio_put() rather than free_contig_range(). */
459e98337d1SYu Zhao #define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
460e98337d1SYu Zhao 
4611da177e4SLinus Torvalds #endif /* __LINUX_GFP_H */
462