1cb5a065bSIngo Molnar /* SPDX-License-Identifier: GPL-2.0 */ 2cb5a065bSIngo Molnar #ifndef __LINUX_GFP_TYPES_H 3cb5a065bSIngo Molnar #define __LINUX_GFP_TYPES_H 4cb5a065bSIngo Molnar 5592447f6SWei Yang #include <linux/bits.h> 6592447f6SWei Yang 7cb5a065bSIngo Molnar /* The typedef is in types.h but we want the documentation here */ 8cb5a065bSIngo Molnar #if 0 9cb5a065bSIngo Molnar /** 10cb5a065bSIngo Molnar * typedef gfp_t - Memory allocation flags. 11cb5a065bSIngo Molnar * 12cb5a065bSIngo Molnar * GFP flags are commonly used throughout Linux to indicate how memory 13cb5a065bSIngo Molnar * should be allocated. The GFP acronym stands for get_free_pages(), 14cb5a065bSIngo Molnar * the underlying memory allocation function. Not every GFP flag is 15cb5a065bSIngo Molnar * supported by every function which may allocate memory. Most users 16cb5a065bSIngo Molnar * will want to use a plain ``GFP_KERNEL``. 17cb5a065bSIngo Molnar */ 18cb5a065bSIngo Molnar typedef unsigned int __bitwise gfp_t; 19cb5a065bSIngo Molnar #endif 20cb5a065bSIngo Molnar 21cb5a065bSIngo Molnar /* 22cb5a065bSIngo Molnar * In case of changes, please don't forget to update 23cb5a065bSIngo Molnar * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c 24cb5a065bSIngo Molnar */ 25cb5a065bSIngo Molnar 26772dd034SSuren Baghdasaryan enum { 27772dd034SSuren Baghdasaryan ___GFP_DMA_BIT, 28772dd034SSuren Baghdasaryan ___GFP_HIGHMEM_BIT, 29772dd034SSuren Baghdasaryan ___GFP_DMA32_BIT, 30772dd034SSuren Baghdasaryan ___GFP_MOVABLE_BIT, 31772dd034SSuren Baghdasaryan ___GFP_RECLAIMABLE_BIT, 32772dd034SSuren Baghdasaryan ___GFP_HIGH_BIT, 33772dd034SSuren Baghdasaryan ___GFP_IO_BIT, 34772dd034SSuren Baghdasaryan ___GFP_FS_BIT, 35772dd034SSuren Baghdasaryan ___GFP_ZERO_BIT, 36772dd034SSuren Baghdasaryan ___GFP_UNUSED_BIT, /* 0x200u unused */ 37772dd034SSuren Baghdasaryan ___GFP_DIRECT_RECLAIM_BIT, 38772dd034SSuren Baghdasaryan ___GFP_KSWAPD_RECLAIM_BIT, 39772dd034SSuren Baghdasaryan ___GFP_WRITE_BIT, 40772dd034SSuren Baghdasaryan ___GFP_NOWARN_BIT, 41772dd034SSuren Baghdasaryan ___GFP_RETRY_MAYFAIL_BIT, 42772dd034SSuren Baghdasaryan ___GFP_NOFAIL_BIT, 43772dd034SSuren Baghdasaryan ___GFP_NORETRY_BIT, 44772dd034SSuren Baghdasaryan ___GFP_MEMALLOC_BIT, 45772dd034SSuren Baghdasaryan ___GFP_COMP_BIT, 46772dd034SSuren Baghdasaryan ___GFP_NOMEMALLOC_BIT, 47772dd034SSuren Baghdasaryan ___GFP_HARDWALL_BIT, 48772dd034SSuren Baghdasaryan ___GFP_THISNODE_BIT, 49772dd034SSuren Baghdasaryan ___GFP_ACCOUNT_BIT, 50772dd034SSuren Baghdasaryan ___GFP_ZEROTAGS_BIT, 51cb5a065bSIngo Molnar #ifdef CONFIG_KASAN_HW_TAGS 52772dd034SSuren Baghdasaryan ___GFP_SKIP_ZERO_BIT, 53772dd034SSuren Baghdasaryan ___GFP_SKIP_KASAN_BIT, 54772dd034SSuren Baghdasaryan #endif 55772dd034SSuren Baghdasaryan #ifdef CONFIG_LOCKDEP 56772dd034SSuren Baghdasaryan ___GFP_NOLOCKDEP_BIT, 57772dd034SSuren Baghdasaryan #endif 58768c33beSSuren Baghdasaryan #ifdef CONFIG_SLAB_OBJ_EXT 59768c33beSSuren Baghdasaryan ___GFP_NO_OBJ_EXT_BIT, 60768c33beSSuren Baghdasaryan #endif 61772dd034SSuren Baghdasaryan ___GFP_LAST_BIT 62772dd034SSuren Baghdasaryan }; 63772dd034SSuren Baghdasaryan 64772dd034SSuren Baghdasaryan /* Plain integer GFP bitmasks. Do not use this directly. */ 65772dd034SSuren Baghdasaryan #define ___GFP_DMA BIT(___GFP_DMA_BIT) 66772dd034SSuren Baghdasaryan #define ___GFP_HIGHMEM BIT(___GFP_HIGHMEM_BIT) 67772dd034SSuren Baghdasaryan #define ___GFP_DMA32 BIT(___GFP_DMA32_BIT) 68772dd034SSuren Baghdasaryan #define ___GFP_MOVABLE BIT(___GFP_MOVABLE_BIT) 69772dd034SSuren Baghdasaryan #define ___GFP_RECLAIMABLE BIT(___GFP_RECLAIMABLE_BIT) 70772dd034SSuren Baghdasaryan #define ___GFP_HIGH BIT(___GFP_HIGH_BIT) 71772dd034SSuren Baghdasaryan #define ___GFP_IO BIT(___GFP_IO_BIT) 72772dd034SSuren Baghdasaryan #define ___GFP_FS BIT(___GFP_FS_BIT) 73772dd034SSuren Baghdasaryan #define ___GFP_ZERO BIT(___GFP_ZERO_BIT) 74772dd034SSuren Baghdasaryan /* 0x200u unused */ 75772dd034SSuren Baghdasaryan #define ___GFP_DIRECT_RECLAIM BIT(___GFP_DIRECT_RECLAIM_BIT) 76772dd034SSuren Baghdasaryan #define ___GFP_KSWAPD_RECLAIM BIT(___GFP_KSWAPD_RECLAIM_BIT) 77772dd034SSuren Baghdasaryan #define ___GFP_WRITE BIT(___GFP_WRITE_BIT) 78772dd034SSuren Baghdasaryan #define ___GFP_NOWARN BIT(___GFP_NOWARN_BIT) 79772dd034SSuren Baghdasaryan #define ___GFP_RETRY_MAYFAIL BIT(___GFP_RETRY_MAYFAIL_BIT) 80772dd034SSuren Baghdasaryan #define ___GFP_NOFAIL BIT(___GFP_NOFAIL_BIT) 81772dd034SSuren Baghdasaryan #define ___GFP_NORETRY BIT(___GFP_NORETRY_BIT) 82772dd034SSuren Baghdasaryan #define ___GFP_MEMALLOC BIT(___GFP_MEMALLOC_BIT) 83772dd034SSuren Baghdasaryan #define ___GFP_COMP BIT(___GFP_COMP_BIT) 84772dd034SSuren Baghdasaryan #define ___GFP_NOMEMALLOC BIT(___GFP_NOMEMALLOC_BIT) 85772dd034SSuren Baghdasaryan #define ___GFP_HARDWALL BIT(___GFP_HARDWALL_BIT) 86772dd034SSuren Baghdasaryan #define ___GFP_THISNODE BIT(___GFP_THISNODE_BIT) 87772dd034SSuren Baghdasaryan #define ___GFP_ACCOUNT BIT(___GFP_ACCOUNT_BIT) 88772dd034SSuren Baghdasaryan #define ___GFP_ZEROTAGS BIT(___GFP_ZEROTAGS_BIT) 89772dd034SSuren Baghdasaryan #ifdef CONFIG_KASAN_HW_TAGS 90772dd034SSuren Baghdasaryan #define ___GFP_SKIP_ZERO BIT(___GFP_SKIP_ZERO_BIT) 91772dd034SSuren Baghdasaryan #define ___GFP_SKIP_KASAN BIT(___GFP_SKIP_KASAN_BIT) 92cb5a065bSIngo Molnar #else 93cb5a065bSIngo Molnar #define ___GFP_SKIP_ZERO 0 940a54864fSPeter Collingbourne #define ___GFP_SKIP_KASAN 0 95cb5a065bSIngo Molnar #endif 96cb5a065bSIngo Molnar #ifdef CONFIG_LOCKDEP 97772dd034SSuren Baghdasaryan #define ___GFP_NOLOCKDEP BIT(___GFP_NOLOCKDEP_BIT) 98cb5a065bSIngo Molnar #else 99cb5a065bSIngo Molnar #define ___GFP_NOLOCKDEP 0 100cb5a065bSIngo Molnar #endif 101768c33beSSuren Baghdasaryan #ifdef CONFIG_SLAB_OBJ_EXT 102768c33beSSuren Baghdasaryan #define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT) 103768c33beSSuren Baghdasaryan #else 104768c33beSSuren Baghdasaryan #define ___GFP_NO_OBJ_EXT 0 105768c33beSSuren Baghdasaryan #endif 106cb5a065bSIngo Molnar 107cb5a065bSIngo Molnar /* 108cb5a065bSIngo Molnar * Physical address zone modifiers (see linux/mmzone.h - low four bits) 109cb5a065bSIngo Molnar * 110cb5a065bSIngo Molnar * Do not put any conditional on these. If necessary modify the definitions 111cb5a065bSIngo Molnar * without the underscores and use them consistently. The definitions here may 112cb5a065bSIngo Molnar * be used in bit comparisons. 113cb5a065bSIngo Molnar */ 114cb5a065bSIngo Molnar #define __GFP_DMA ((__force gfp_t)___GFP_DMA) 115cb5a065bSIngo Molnar #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) 116cb5a065bSIngo Molnar #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) 117cb5a065bSIngo Molnar #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ 118cb5a065bSIngo Molnar #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) 119cb5a065bSIngo Molnar 120cb5a065bSIngo Molnar /** 121cb5a065bSIngo Molnar * DOC: Page mobility and placement hints 122cb5a065bSIngo Molnar * 123cb5a065bSIngo Molnar * Page mobility and placement hints 124cb5a065bSIngo Molnar * --------------------------------- 125cb5a065bSIngo Molnar * 126cb5a065bSIngo Molnar * These flags provide hints about how mobile the page is. Pages with similar 127cb5a065bSIngo Molnar * mobility are placed within the same pageblocks to minimise problems due 128cb5a065bSIngo Molnar * to external fragmentation. 129cb5a065bSIngo Molnar * 130cb5a065bSIngo Molnar * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be 131cb5a065bSIngo Molnar * moved by page migration during memory compaction or can be reclaimed. 132cb5a065bSIngo Molnar * 133cb5a065bSIngo Molnar * %__GFP_RECLAIMABLE is used for slab allocations that specify 134cb5a065bSIngo Molnar * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. 135cb5a065bSIngo Molnar * 136cb5a065bSIngo Molnar * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, 137cb5a065bSIngo Molnar * these pages will be spread between local zones to avoid all the dirty 138cb5a065bSIngo Molnar * pages being in one zone (fair zone allocation policy). 139cb5a065bSIngo Molnar * 140cb5a065bSIngo Molnar * %__GFP_HARDWALL enforces the cpuset memory allocation policy. 141cb5a065bSIngo Molnar * 142cb5a065bSIngo Molnar * %__GFP_THISNODE forces the allocation to be satisfied from the requested 143cb5a065bSIngo Molnar * node with no fallbacks or placement policy enforcements. 144cb5a065bSIngo Molnar * 145cb5a065bSIngo Molnar * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. 146768c33beSSuren Baghdasaryan * 147768c33beSSuren Baghdasaryan * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension. 148cb5a065bSIngo Molnar */ 149cb5a065bSIngo Molnar #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 150cb5a065bSIngo Molnar #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) 151cb5a065bSIngo Molnar #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) 152cb5a065bSIngo Molnar #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) 153cb5a065bSIngo Molnar #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) 154768c33beSSuren Baghdasaryan #define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT) 155cb5a065bSIngo Molnar 156cb5a065bSIngo Molnar /** 157cb5a065bSIngo Molnar * DOC: Watermark modifiers 158cb5a065bSIngo Molnar * 159cb5a065bSIngo Molnar * Watermark modifiers -- controls access to emergency reserves 160cb5a065bSIngo Molnar * ------------------------------------------------------------ 161cb5a065bSIngo Molnar * 162cb5a065bSIngo Molnar * %__GFP_HIGH indicates that the caller is high-priority and that granting 163cb5a065bSIngo Molnar * the request is necessary before the system can make forward progress. 1642973d822SNeilBrown * For example creating an IO context to clean pages and requests 1652973d822SNeilBrown * from atomic context. 166cb5a065bSIngo Molnar * 167cb5a065bSIngo Molnar * %__GFP_MEMALLOC allows access to all memory. This should only be used when 168cb5a065bSIngo Molnar * the caller guarantees the allocation will allow more memory to be freed 169cb5a065bSIngo Molnar * very shortly e.g. process exiting or swapping. Users either should 170cb5a065bSIngo Molnar * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). 171cb5a065bSIngo Molnar * Users of this flag have to be extremely careful to not deplete the reserve 172cb5a065bSIngo Molnar * completely and implement a throttling mechanism which controls the 173cb5a065bSIngo Molnar * consumption of the reserve based on the amount of freed memory. 174cb5a065bSIngo Molnar * Usage of a pre-allocated pool (e.g. mempool) should be always considered 175cb5a065bSIngo Molnar * before using this flag. 176cb5a065bSIngo Molnar * 177cb5a065bSIngo Molnar * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. 178cb5a065bSIngo Molnar * This takes precedence over the %__GFP_MEMALLOC flag if both are set. 179cb5a065bSIngo Molnar */ 180cb5a065bSIngo Molnar #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) 181cb5a065bSIngo Molnar #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) 182cb5a065bSIngo Molnar #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) 183cb5a065bSIngo Molnar 184cb5a065bSIngo Molnar /** 185cb5a065bSIngo Molnar * DOC: Reclaim modifiers 186cb5a065bSIngo Molnar * 187cb5a065bSIngo Molnar * Reclaim modifiers 188cb5a065bSIngo Molnar * ----------------- 189cb5a065bSIngo Molnar * Please note that all the following flags are only applicable to sleepable 190cb5a065bSIngo Molnar * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). 191cb5a065bSIngo Molnar * 192cb5a065bSIngo Molnar * %__GFP_IO can start physical IO. 193cb5a065bSIngo Molnar * 194cb5a065bSIngo Molnar * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the 195cb5a065bSIngo Molnar * allocator recursing into the filesystem which might already be holding 196cb5a065bSIngo Molnar * locks. 197cb5a065bSIngo Molnar * 198cb5a065bSIngo Molnar * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. 199cb5a065bSIngo Molnar * This flag can be cleared to avoid unnecessary delays when a fallback 200cb5a065bSIngo Molnar * option is available. 201cb5a065bSIngo Molnar * 202cb5a065bSIngo Molnar * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when 203cb5a065bSIngo Molnar * the low watermark is reached and have it reclaim pages until the high 204cb5a065bSIngo Molnar * watermark is reached. A caller may wish to clear this flag when fallback 205cb5a065bSIngo Molnar * options are available and the reclaim is likely to disrupt the system. The 206cb5a065bSIngo Molnar * canonical example is THP allocation where a fallback is cheap but 207cb5a065bSIngo Molnar * reclaim/compaction may cause indirect stalls. 208cb5a065bSIngo Molnar * 209cb5a065bSIngo Molnar * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. 210cb5a065bSIngo Molnar * 211cb5a065bSIngo Molnar * The default allocator behavior depends on the request size. We have a concept 2120abfa8efSRandy Dunlap * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). 213cb5a065bSIngo Molnar * !costly allocations are too essential to fail so they are implicitly 214cb5a065bSIngo Molnar * non-failing by default (with some exceptions like OOM victims might fail so 215cb5a065bSIngo Molnar * the caller still has to check for failures) while costly requests try to be 216cb5a065bSIngo Molnar * not disruptive and back off even without invoking the OOM killer. 217cb5a065bSIngo Molnar * The following three modifiers might be used to override some of these 21817d75422SBarry Song * implicit rules. Please note that all of them must be used along with 21917d75422SBarry Song * %__GFP_DIRECT_RECLAIM flag. 220cb5a065bSIngo Molnar * 221cb5a065bSIngo Molnar * %__GFP_NORETRY: The VM implementation will try only very lightweight 222cb5a065bSIngo Molnar * memory direct reclaim to get some memory under memory pressure (thus 223cb5a065bSIngo Molnar * it can sleep). It will avoid disruptive actions like OOM killer. The 224cb5a065bSIngo Molnar * caller must handle the failure which is quite likely to happen under 225cb5a065bSIngo Molnar * heavy memory pressure. The flag is suitable when failure can easily be 2260abfa8efSRandy Dunlap * handled at small cost, such as reduced throughput. 227cb5a065bSIngo Molnar * 228cb5a065bSIngo Molnar * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim 229cb5a065bSIngo Molnar * procedures that have previously failed if there is some indication 230cb5a065bSIngo Molnar * that progress has been made elsewhere. It can wait for other 2310abfa8efSRandy Dunlap * tasks to attempt high-level approaches to freeing memory such as 232cb5a065bSIngo Molnar * compaction (which removes fragmentation) and page-out. 233cb5a065bSIngo Molnar * There is still a definite limit to the number of retries, but it is 234cb5a065bSIngo Molnar * a larger limit than with %__GFP_NORETRY. 235cb5a065bSIngo Molnar * Allocations with this flag may fail, but only when there is 236cb5a065bSIngo Molnar * genuinely little unused memory. While these allocations do not 237cb5a065bSIngo Molnar * directly trigger the OOM killer, their failure indicates that 238cb5a065bSIngo Molnar * the system is likely to need to use the OOM killer soon. The 239cb5a065bSIngo Molnar * caller must handle failure, but can reasonably do so by failing 240cb5a065bSIngo Molnar * a higher-level request, or completing it only in a much less 241cb5a065bSIngo Molnar * efficient manner. 242cb5a065bSIngo Molnar * If the allocation does fail, and the caller is in a position to 243cb5a065bSIngo Molnar * free some non-essential memory, doing so could benefit the system 244cb5a065bSIngo Molnar * as a whole. 245cb5a065bSIngo Molnar * 246cb5a065bSIngo Molnar * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller 247cb5a065bSIngo Molnar * cannot handle allocation failures. The allocation could block 248cb5a065bSIngo Molnar * indefinitely but will never return with failure. Testing for 249cb5a065bSIngo Molnar * failure is pointless. 25017d75422SBarry Song * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM. 25117d75422SBarry Song * It should _never_ be used in non-sleepable contexts. 252cb5a065bSIngo Molnar * New users should be evaluated carefully (and the flag should be 253cb5a065bSIngo Molnar * used only when there is no reasonable failure policy) but it is 254cb5a065bSIngo Molnar * definitely preferable to use the flag rather than opencode endless 255cb5a065bSIngo Molnar * loop around allocator. 256*903edea6SBarry Song * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is 257*903edea6SBarry Song * not supported. Please consider using kvmalloc() instead. 258cb5a065bSIngo Molnar */ 259cb5a065bSIngo Molnar #define __GFP_IO ((__force gfp_t)___GFP_IO) 260cb5a065bSIngo Molnar #define __GFP_FS ((__force gfp_t)___GFP_FS) 261cb5a065bSIngo Molnar #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ 262cb5a065bSIngo Molnar #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ 263cb5a065bSIngo Molnar #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) 264cb5a065bSIngo Molnar #define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) 265cb5a065bSIngo Molnar #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) 266cb5a065bSIngo Molnar #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) 267cb5a065bSIngo Molnar 268cb5a065bSIngo Molnar /** 269cb5a065bSIngo Molnar * DOC: Action modifiers 270cb5a065bSIngo Molnar * 271cb5a065bSIngo Molnar * Action modifiers 272cb5a065bSIngo Molnar * ---------------- 273cb5a065bSIngo Molnar * 274cb5a065bSIngo Molnar * %__GFP_NOWARN suppresses allocation failure reports. 275cb5a065bSIngo Molnar * 276cb5a065bSIngo Molnar * %__GFP_COMP address compound page metadata. 277cb5a065bSIngo Molnar * 278cb5a065bSIngo Molnar * %__GFP_ZERO returns a zeroed page on success. 279cb5a065bSIngo Molnar * 280cb5a065bSIngo Molnar * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself 281cb5a065bSIngo Molnar * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that 282cb5a065bSIngo Molnar * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting 283cb5a065bSIngo Molnar * memory tags at the same time as zeroing memory has minimal additional 2840abfa8efSRandy Dunlap * performance impact. 285cb5a065bSIngo Molnar * 2860a54864fSPeter Collingbourne * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation. 2870a54864fSPeter Collingbourne * Used for userspace and vmalloc pages; the latter are unpoisoned by 2880a54864fSPeter Collingbourne * kasan_unpoison_vmalloc instead. For userspace pages, results in 2890a54864fSPeter Collingbourne * poisoning being skipped as well, see should_skip_kasan_poison for 2900a54864fSPeter Collingbourne * details. Only effective in HW_TAGS mode. 291cb5a065bSIngo Molnar */ 292cb5a065bSIngo Molnar #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) 293cb5a065bSIngo Molnar #define __GFP_COMP ((__force gfp_t)___GFP_COMP) 294cb5a065bSIngo Molnar #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) 295cb5a065bSIngo Molnar #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) 296cb5a065bSIngo Molnar #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) 2970a54864fSPeter Collingbourne #define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN) 298cb5a065bSIngo Molnar 299cb5a065bSIngo Molnar /* Disable lockdep for GFP context tracking */ 300cb5a065bSIngo Molnar #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) 301cb5a065bSIngo Molnar 302cb5a065bSIngo Molnar /* Room for N __GFP_FOO bits */ 303772dd034SSuren Baghdasaryan #define __GFP_BITS_SHIFT ___GFP_LAST_BIT 304cb5a065bSIngo Molnar #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 305cb5a065bSIngo Molnar 306cb5a065bSIngo Molnar /** 307cb5a065bSIngo Molnar * DOC: Useful GFP flag combinations 308cb5a065bSIngo Molnar * 309cb5a065bSIngo Molnar * Useful GFP flag combinations 310cb5a065bSIngo Molnar * ---------------------------- 311cb5a065bSIngo Molnar * 312cb5a065bSIngo Molnar * Useful GFP flag combinations that are commonly used. It is recommended 313cb5a065bSIngo Molnar * that subsystems start with one of these combinations and then set/clear 314cb5a065bSIngo Molnar * %__GFP_FOO flags as necessary. 315cb5a065bSIngo Molnar * 316cb5a065bSIngo Molnar * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower 317cb5a065bSIngo Molnar * watermark is applied to allow access to "atomic reserves". 318cb5a065bSIngo Molnar * The current implementation doesn't support NMI and few other strict 319cb5a065bSIngo Molnar * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. 320cb5a065bSIngo Molnar * 321cb5a065bSIngo Molnar * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires 322cb5a065bSIngo Molnar * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. 323cb5a065bSIngo Molnar * 324cb5a065bSIngo Molnar * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is 325cb5a065bSIngo Molnar * accounted to kmemcg. 326cb5a065bSIngo Molnar * 327cb5a065bSIngo Molnar * %GFP_NOWAIT is for kernel allocations that should not stall for direct 32816f5dfbcSMatthew Wilcox (Oracle) * reclaim, start physical IO or use any filesystem callback. It is very 32916f5dfbcSMatthew Wilcox (Oracle) * likely to fail to allocate memory, even for very small allocations. 330cb5a065bSIngo Molnar * 331cb5a065bSIngo Molnar * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages 332cb5a065bSIngo Molnar * that do not require the starting of any physical IO. 333cb5a065bSIngo Molnar * Please try to avoid using this flag directly and instead use 334cb5a065bSIngo Molnar * memalloc_noio_{save,restore} to mark the whole scope which cannot 335cb5a065bSIngo Molnar * perform any IO with a short explanation why. All allocation requests 336cb5a065bSIngo Molnar * will inherit GFP_NOIO implicitly. 337cb5a065bSIngo Molnar * 338cb5a065bSIngo Molnar * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. 339cb5a065bSIngo Molnar * Please try to avoid using this flag directly and instead use 340cb5a065bSIngo Molnar * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't 341cb5a065bSIngo Molnar * recurse into the FS layer with a short explanation why. All allocation 342cb5a065bSIngo Molnar * requests will inherit GFP_NOFS implicitly. 343cb5a065bSIngo Molnar * 344cb5a065bSIngo Molnar * %GFP_USER is for userspace allocations that also need to be directly 345cb5a065bSIngo Molnar * accessibly by the kernel or hardware. It is typically used by hardware 346cb5a065bSIngo Molnar * for buffers that are mapped to userspace (e.g. graphics) that hardware 347cb5a065bSIngo Molnar * still must DMA to. cpuset limits are enforced for these allocations. 348cb5a065bSIngo Molnar * 349cb5a065bSIngo Molnar * %GFP_DMA exists for historical reasons and should be avoided where possible. 350cb5a065bSIngo Molnar * The flags indicates that the caller requires that the lowest zone be 351cb5a065bSIngo Molnar * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but 352cb5a065bSIngo Molnar * it would require careful auditing as some users really require it and 353cb5a065bSIngo Molnar * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the 354cb5a065bSIngo Molnar * lowest zone as a type of emergency reserve. 355cb5a065bSIngo Molnar * 356cb5a065bSIngo Molnar * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit 357cb5a065bSIngo Molnar * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory 358cb5a065bSIngo Molnar * because the DMA32 kmalloc cache array is not implemented. 359cb5a065bSIngo Molnar * (Reason: there is no such user in kernel). 360cb5a065bSIngo Molnar * 361cb5a065bSIngo Molnar * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, 362cb5a065bSIngo Molnar * do not need to be directly accessible by the kernel but that cannot 363cb5a065bSIngo Molnar * move once in use. An example may be a hardware allocation that maps 364cb5a065bSIngo Molnar * data directly into userspace but has no addressing limitations. 365cb5a065bSIngo Molnar * 366cb5a065bSIngo Molnar * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not 367cb5a065bSIngo Molnar * need direct access to but can use kmap() when access is required. They 368cb5a065bSIngo Molnar * are expected to be movable via page reclaim or page migration. Typically, 369cb5a065bSIngo Molnar * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. 370cb5a065bSIngo Molnar * 371cb5a065bSIngo Molnar * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They 372cb5a065bSIngo Molnar * are compound allocations that will generally fail quickly if memory is not 373cb5a065bSIngo Molnar * available and will not wake kswapd/kcompactd on failure. The _LIGHT 374cb5a065bSIngo Molnar * version does not attempt reclaim/compaction at all and is by default used 375cb5a065bSIngo Molnar * in page fault path, while the non-light is used by khugepaged. 376cb5a065bSIngo Molnar */ 3772973d822SNeilBrown #define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM) 378cb5a065bSIngo Molnar #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) 379cb5a065bSIngo Molnar #define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) 38016f5dfbcSMatthew Wilcox (Oracle) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN) 381cb5a065bSIngo Molnar #define GFP_NOIO (__GFP_RECLAIM) 382cb5a065bSIngo Molnar #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) 383cb5a065bSIngo Molnar #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 384cb5a065bSIngo Molnar #define GFP_DMA __GFP_DMA 385cb5a065bSIngo Molnar #define GFP_DMA32 __GFP_DMA32 386cb5a065bSIngo Molnar #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) 3870a54864fSPeter Collingbourne #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN) 388cb5a065bSIngo Molnar #define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 389cb5a065bSIngo Molnar __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) 390cb5a065bSIngo Molnar #define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) 391cb5a065bSIngo Molnar 392cb5a065bSIngo Molnar #endif /* __LINUX_GFP_TYPES_H */ 393