1 #ifndef __LINUX_GFP_H 2 #define __LINUX_GFP_H 3 4 #include <linux/mmdebug.h> 5 #include <linux/mmzone.h> 6 #include <linux/stddef.h> 7 #include <linux/linkage.h> 8 #include <linux/topology.h> 9 10 struct vm_area_struct; 11 12 /* Plain integer GFP bitmasks. Do not use this directly. */ 13 #define ___GFP_DMA 0x01u 14 #define ___GFP_HIGHMEM 0x02u 15 #define ___GFP_DMA32 0x04u 16 #define ___GFP_MOVABLE 0x08u 17 #define ___GFP_RECLAIMABLE 0x10u 18 #define ___GFP_HIGH 0x20u 19 #define ___GFP_IO 0x40u 20 #define ___GFP_FS 0x80u 21 #define ___GFP_COLD 0x100u 22 #define ___GFP_NOWARN 0x200u 23 #define ___GFP_REPEAT 0x400u 24 #define ___GFP_NOFAIL 0x800u 25 #define ___GFP_NORETRY 0x1000u 26 #define ___GFP_MEMALLOC 0x2000u 27 #define ___GFP_COMP 0x4000u 28 #define ___GFP_ZERO 0x8000u 29 #define ___GFP_NOMEMALLOC 0x10000u 30 #define ___GFP_HARDWALL 0x20000u 31 #define ___GFP_THISNODE 0x40000u 32 #define ___GFP_ATOMIC 0x80000u 33 #define ___GFP_NOACCOUNT 0x100000u 34 #define ___GFP_NOTRACK 0x200000u 35 #define ___GFP_DIRECT_RECLAIM 0x400000u 36 #define ___GFP_OTHER_NODE 0x800000u 37 #define ___GFP_WRITE 0x1000000u 38 #define ___GFP_KSWAPD_RECLAIM 0x2000000u 39 /* If the above are modified, __GFP_BITS_SHIFT may need updating */ 40 41 /* 42 * Physical address zone modifiers (see linux/mmzone.h - low four bits) 43 * 44 * Do not put any conditional on these. If necessary modify the definitions 45 * without the underscores and use them consistently. The definitions here may 46 * be used in bit comparisons. 47 */ 48 #define __GFP_DMA ((__force gfp_t)___GFP_DMA) 49 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) 50 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) 51 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ 52 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ 53 #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) 54 55 /* 56 * Page mobility and placement hints 57 * 58 * These flags provide hints about how mobile the page is. Pages with similar 59 * mobility are placed within the same pageblocks to minimise problems due 60 * to external fragmentation. 61 * 62 * __GFP_MOVABLE (also a zone modifier) indicates that the page can be 63 * moved by page migration during memory compaction or can be reclaimed. 64 * 65 * __GFP_RECLAIMABLE is used for slab allocations that specify 66 * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. 67 * 68 * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, 69 * these pages will be spread between local zones to avoid all the dirty 70 * pages being in one zone (fair zone allocation policy). 71 * 72 * __GFP_HARDWALL enforces the cpuset memory allocation policy. 73 * 74 * __GFP_THISNODE forces the allocation to be satisified from the requested 75 * node with no fallbacks or placement policy enforcements. 76 */ 77 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 78 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) 79 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) 80 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) 81 82 /* 83 * Watermark modifiers -- controls access to emergency reserves 84 * 85 * __GFP_HIGH indicates that the caller is high-priority and that granting 86 * the request is necessary before the system can make forward progress. 87 * For example, creating an IO context to clean pages. 88 * 89 * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is 90 * high priority. Users are typically interrupt handlers. This may be 91 * used in conjunction with __GFP_HIGH 92 * 93 * __GFP_MEMALLOC allows access to all memory. This should only be used when 94 * the caller guarantees the allocation will allow more memory to be freed 95 * very shortly e.g. process exiting or swapping. Users either should 96 * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). 97 * 98 * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. 99 * This takes precedence over the __GFP_MEMALLOC flag if both are set. 100 * 101 * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. 102 */ 103 #define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) 104 #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) 105 #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) 106 #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) 107 #define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) 108 109 /* 110 * Reclaim modifiers 111 * 112 * __GFP_IO can start physical IO. 113 * 114 * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the 115 * allocator recursing into the filesystem which might already be holding 116 * locks. 117 * 118 * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. 119 * This flag can be cleared to avoid unnecessary delays when a fallback 120 * option is available. 121 * 122 * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when 123 * the low watermark is reached and have it reclaim pages until the high 124 * watermark is reached. A caller may wish to clear this flag when fallback 125 * options are available and the reclaim is likely to disrupt the system. The 126 * canonical example is THP allocation where a fallback is cheap but 127 * reclaim/compaction may cause indirect stalls. 128 * 129 * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. 130 * 131 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt 132 * _might_ fail. This depends upon the particular VM implementation. 133 * 134 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller 135 * cannot handle allocation failures. New users should be evaluated carefully 136 * (and the flag should be used only when there is no reasonable failure 137 * policy) but it is definitely preferable to use the flag rather than 138 * opencode endless loop around allocator. 139 * 140 * __GFP_NORETRY: The VM implementation must not retry indefinitely and will 141 * return NULL when direct reclaim and memory compaction have failed to allow 142 * the allocation to succeed. The OOM killer is not called with the current 143 * implementation. 144 */ 145 #define __GFP_IO ((__force gfp_t)___GFP_IO) 146 #define __GFP_FS ((__force gfp_t)___GFP_FS) 147 #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ 148 #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ 149 #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) 150 #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) 151 #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) 152 #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) 153 154 /* 155 * Action modifiers 156 * 157 * __GFP_COLD indicates that the caller does not expect to be used in the near 158 * future. Where possible, a cache-cold page will be returned. 159 * 160 * __GFP_NOWARN suppresses allocation failure reports. 161 * 162 * __GFP_COMP address compound page metadata. 163 * 164 * __GFP_ZERO returns a zeroed page on success. 165 * 166 * __GFP_NOTRACK avoids tracking with kmemcheck. 167 * 168 * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of 169 * distinguishing in the source between false positives and allocations that 170 * cannot be supported (e.g. page tables). 171 * 172 * __GFP_OTHER_NODE is for allocations that are on a remote node but that 173 * should not be accounted for as a remote allocation in vmstat. A 174 * typical user would be khugepaged collapsing a huge page on a remote 175 * node. 176 */ 177 #define __GFP_COLD ((__force gfp_t)___GFP_COLD) 178 #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) 179 #define __GFP_COMP ((__force gfp_t)___GFP_COMP) 180 #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) 181 #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) 182 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) 183 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) 184 185 /* Room for N __GFP_FOO bits */ 186 #define __GFP_BITS_SHIFT 26 187 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 188 189 /* 190 * Useful GFP flag combinations that are commonly used. It is recommended 191 * that subsystems start with one of these combinations and then set/clear 192 * __GFP_FOO flags as necessary. 193 * 194 * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower 195 * watermark is applied to allow access to "atomic reserves" 196 * 197 * GFP_KERNEL is typical for kernel-internal allocations. The caller requires 198 * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. 199 * 200 * GFP_NOWAIT is for kernel allocations that should not stall for direct 201 * reclaim, start physical IO or use any filesystem callback. 202 * 203 * GFP_NOIO will use direct reclaim to discard clean pages or slab pages 204 * that do not require the starting of any physical IO. 205 * 206 * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. 207 * 208 * GFP_USER is for userspace allocations that also need to be directly 209 * accessibly by the kernel or hardware. It is typically used by hardware 210 * for buffers that are mapped to userspace (e.g. graphics) that hardware 211 * still must DMA to. cpuset limits are enforced for these allocations. 212 * 213 * GFP_DMA exists for historical reasons and should be avoided where possible. 214 * The flags indicates that the caller requires that the lowest zone be 215 * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but 216 * it would require careful auditing as some users really require it and 217 * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the 218 * lowest zone as a type of emergency reserve. 219 * 220 * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit 221 * address. 222 * 223 * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, 224 * do not need to be directly accessible by the kernel but that cannot 225 * move once in use. An example may be a hardware allocation that maps 226 * data directly into userspace but has no addressing limitations. 227 * 228 * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not 229 * need direct access to but can use kmap() when access is required. They 230 * are expected to be movable via page reclaim or page migration. Typically, 231 * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. 232 * 233 * GFP_TRANSHUGE is used for THP allocations. They are compound allocations 234 * that will fail quickly if memory is not available and will not wake 235 * kswapd on failure. 236 */ 237 #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) 238 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) 239 #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) 240 #define GFP_NOIO (__GFP_RECLAIM) 241 #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) 242 #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ 243 __GFP_RECLAIMABLE) 244 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 245 #define GFP_DMA __GFP_DMA 246 #define GFP_DMA32 __GFP_DMA32 247 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) 248 #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) 249 #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 250 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ 251 ~__GFP_KSWAPD_RECLAIM) 252 253 /* Convert GFP flags to their corresponding migrate type */ 254 #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) 255 #define GFP_MOVABLE_SHIFT 3 256 257 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) 258 { 259 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 260 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); 261 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); 262 263 if (unlikely(page_group_by_mobility_disabled)) 264 return MIGRATE_UNMOVABLE; 265 266 /* Group based on mobility */ 267 return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; 268 } 269 #undef GFP_MOVABLE_MASK 270 #undef GFP_MOVABLE_SHIFT 271 272 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 273 { 274 return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); 275 } 276 277 #ifdef CONFIG_HIGHMEM 278 #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM 279 #else 280 #define OPT_ZONE_HIGHMEM ZONE_NORMAL 281 #endif 282 283 #ifdef CONFIG_ZONE_DMA 284 #define OPT_ZONE_DMA ZONE_DMA 285 #else 286 #define OPT_ZONE_DMA ZONE_NORMAL 287 #endif 288 289 #ifdef CONFIG_ZONE_DMA32 290 #define OPT_ZONE_DMA32 ZONE_DMA32 291 #else 292 #define OPT_ZONE_DMA32 ZONE_NORMAL 293 #endif 294 295 /* 296 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the 297 * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long 298 * and there are 16 of them to cover all possible combinations of 299 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. 300 * 301 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 302 * But GFP_MOVABLE is not only a zone specifier but also an allocation 303 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. 304 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". 305 * 306 * bit result 307 * ================= 308 * 0x0 => NORMAL 309 * 0x1 => DMA or NORMAL 310 * 0x2 => HIGHMEM or NORMAL 311 * 0x3 => BAD (DMA+HIGHMEM) 312 * 0x4 => DMA32 or DMA or NORMAL 313 * 0x5 => BAD (DMA+DMA32) 314 * 0x6 => BAD (HIGHMEM+DMA32) 315 * 0x7 => BAD (HIGHMEM+DMA32+DMA) 316 * 0x8 => NORMAL (MOVABLE+0) 317 * 0x9 => DMA or NORMAL (MOVABLE+DMA) 318 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) 319 * 0xb => BAD (MOVABLE+HIGHMEM+DMA) 320 * 0xc => DMA32 (MOVABLE+DMA32) 321 * 0xd => BAD (MOVABLE+DMA32+DMA) 322 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) 323 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) 324 * 325 * ZONES_SHIFT must be <= 2 on 32 bit platforms. 326 */ 327 328 #if 16 * ZONES_SHIFT > BITS_PER_LONG 329 #error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer 330 #endif 331 332 #define GFP_ZONE_TABLE ( \ 333 (ZONE_NORMAL << 0 * ZONES_SHIFT) \ 334 | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ 335 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ 336 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ 337 | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ 338 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ 339 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ 340 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ 341 ) 342 343 /* 344 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 345 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per 346 * entry starting with bit 0. Bit is set if the combination is not 347 * allowed. 348 */ 349 #define GFP_ZONE_BAD ( \ 350 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ 351 | 1 << (___GFP_DMA | ___GFP_DMA32) \ 352 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ 353 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 354 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ 355 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ 356 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 357 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ 358 ) 359 360 static inline enum zone_type gfp_zone(gfp_t flags) 361 { 362 enum zone_type z; 363 int bit = (__force int) (flags & GFP_ZONEMASK); 364 365 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & 366 ((1 << ZONES_SHIFT) - 1); 367 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); 368 return z; 369 } 370 371 /* 372 * There is only one page-allocator function, and two main namespaces to 373 * it. The alloc_page*() variants return 'struct page *' and as such 374 * can allocate highmem pages, the *get*page*() variants return 375 * virtual kernel addresses to the allocated page(s). 376 */ 377 378 static inline int gfp_zonelist(gfp_t flags) 379 { 380 if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE)) 381 return 1; 382 383 return 0; 384 } 385 386 /* 387 * We get the zone list from the current node and the gfp_mask. 388 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. 389 * There are two zonelists per node, one for all zones with memory and 390 * one containing just zones from the node the zonelist belongs to. 391 * 392 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets 393 * optimized to &contig_page_data at compile-time. 394 */ 395 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) 396 { 397 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); 398 } 399 400 #ifndef HAVE_ARCH_FREE_PAGE 401 static inline void arch_free_page(struct page *page, int order) { } 402 #endif 403 #ifndef HAVE_ARCH_ALLOC_PAGE 404 static inline void arch_alloc_page(struct page *page, int order) { } 405 #endif 406 407 struct page * 408 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 409 struct zonelist *zonelist, nodemask_t *nodemask); 410 411 static inline struct page * 412 __alloc_pages(gfp_t gfp_mask, unsigned int order, 413 struct zonelist *zonelist) 414 { 415 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); 416 } 417 418 /* 419 * Allocate pages, preferring the node given as nid. The node must be valid and 420 * online. For more general interface, see alloc_pages_node(). 421 */ 422 static inline struct page * 423 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 424 { 425 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 426 VM_WARN_ON(!node_online(nid)); 427 428 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); 429 } 430 431 /* 432 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, 433 * prefer the current CPU's closest node. Otherwise node must be valid and 434 * online. 435 */ 436 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 437 unsigned int order) 438 { 439 if (nid == NUMA_NO_NODE) 440 nid = numa_mem_id(); 441 442 return __alloc_pages_node(nid, gfp_mask, order); 443 } 444 445 #ifdef CONFIG_NUMA 446 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 447 448 static inline struct page * 449 alloc_pages(gfp_t gfp_mask, unsigned int order) 450 { 451 return alloc_pages_current(gfp_mask, order); 452 } 453 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 454 struct vm_area_struct *vma, unsigned long addr, 455 int node, bool hugepage); 456 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 457 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) 458 #else 459 #define alloc_pages(gfp_mask, order) \ 460 alloc_pages_node(numa_node_id(), gfp_mask, order) 461 #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ 462 alloc_pages(gfp_mask, order) 463 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ 464 alloc_pages(gfp_mask, order) 465 #endif 466 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 467 #define alloc_page_vma(gfp_mask, vma, addr) \ 468 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) 469 #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 470 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) 471 472 extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); 473 extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, 474 unsigned int order); 475 476 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 477 extern unsigned long get_zeroed_page(gfp_t gfp_mask); 478 479 void *alloc_pages_exact(size_t size, gfp_t gfp_mask); 480 void free_pages_exact(void *virt, size_t size); 481 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); 482 483 #define __get_free_page(gfp_mask) \ 484 __get_free_pages((gfp_mask), 0) 485 486 #define __get_dma_pages(gfp_mask, order) \ 487 __get_free_pages((gfp_mask) | GFP_DMA, (order)) 488 489 extern void __free_pages(struct page *page, unsigned int order); 490 extern void free_pages(unsigned long addr, unsigned int order); 491 extern void free_hot_cold_page(struct page *page, bool cold); 492 extern void free_hot_cold_page_list(struct list_head *list, bool cold); 493 494 struct page_frag_cache; 495 extern void *__alloc_page_frag(struct page_frag_cache *nc, 496 unsigned int fragsz, gfp_t gfp_mask); 497 extern void __free_page_frag(void *addr); 498 499 extern void __free_kmem_pages(struct page *page, unsigned int order); 500 extern void free_kmem_pages(unsigned long addr, unsigned int order); 501 502 #define __free_page(page) __free_pages((page), 0) 503 #define free_page(addr) free_pages((addr), 0) 504 505 void page_alloc_init(void); 506 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 507 void drain_all_pages(struct zone *zone); 508 void drain_local_pages(struct zone *zone); 509 510 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 511 void page_alloc_init_late(void); 512 #else 513 static inline void page_alloc_init_late(void) 514 { 515 } 516 #endif 517 518 /* 519 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 520 * GFP flags are used before interrupts are enabled. Once interrupts are 521 * enabled, it is set to __GFP_BITS_MASK while the system is running. During 522 * hibernation, it is used by PM to avoid I/O during memory allocation while 523 * devices are suspended. 524 */ 525 extern gfp_t gfp_allowed_mask; 526 527 /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ 528 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); 529 530 extern void pm_restrict_gfp_mask(void); 531 extern void pm_restore_gfp_mask(void); 532 533 #ifdef CONFIG_PM_SLEEP 534 extern bool pm_suspended_storage(void); 535 #else 536 static inline bool pm_suspended_storage(void) 537 { 538 return false; 539 } 540 #endif /* CONFIG_PM_SLEEP */ 541 542 #ifdef CONFIG_CMA 543 544 /* The below functions must be run on a range from a single zone. */ 545 extern int alloc_contig_range(unsigned long start, unsigned long end, 546 unsigned migratetype); 547 extern void free_contig_range(unsigned long pfn, unsigned nr_pages); 548 549 /* CMA stuff */ 550 extern void init_cma_reserved_pageblock(struct page *page); 551 552 #endif 553 554 #endif /* __LINUX_GFP_H */ 555