| /linux-6.15/Documentation/translations/zh_CN/core-api/ |
| H A D | gfp_mask-from-fs-io.rst | 29 避免这种死锁问题的传统方法是在调用分配器时,在gfp掩码中清除__GFP_FS和__GFP_IO 41 关键部分。从该作用域的任何分配都将从给定的掩码中删除__GFP_FS和__GFP_IO,所以
|
| /linux-6.15/include/linux/ |
| H A D | gfp_types.h | 260 #define __GFP_FS ((__force gfp_t)___GFP_FS) macro 378 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) 383 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
|
| H A D | gfp.h | 411 return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); in gfp_has_io_fs()
|
| H A D | buffer_head.h | 368 gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); in getblk_unmovable() 379 gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); in __getblk()
|
| /linux-6.15/drivers/gpu/drm/xe/ |
| H A D | xe_shrinker.c | 94 bool can_backup = !!(sc->gfp_mask & __GFP_FS); in xe_shrinker_count() 167 bool can_backup = !!(sc->gfp_mask & __GFP_FS); in xe_shrinker_scan()
|
| /linux-6.15/include/linux/sched/ |
| H A D | mm.h | 260 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context() 262 flags &= ~__GFP_FS; in current_gfp_context()
|
| /linux-6.15/Documentation/core-api/ |
| H A D | gfp_mask-from-fs-io.rst | 19 The traditional way to avoid this deadlock problem is to clear __GFP_FS 35 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
|
| /linux-6.15/fs/nfs/ |
| H A D | fscache.h | 105 if (current_is_kswapd() || !(gfp & __GFP_FS)) in nfs_fscache_release_folio()
|
| /linux-6.15/fs/netfs/ |
| H A D | misc.c | 308 if (current_is_kswapd() || !(gfp & __GFP_FS)) in netfs_release_folio()
|
| /linux-6.15/mm/ |
| H A D | vmpressure.c | 268 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
|
| H A D | internal.h | 74 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 80 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
|
| H A D | oom_kill.c | 1142 if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
|
| H A D | vmalloc.c | 3708 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node() 3710 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node() 3720 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node() 3722 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node()
|
| H A D | compaction.c | 814 if (cc->gfp_mask & __GFP_FS) { in too_many_isolated() 1127 if (!(cc->gfp_mask & __GFP_FS) && mapping) in isolate_migratepages_block()
|
| /linux-6.15/fs/btrfs/ |
| H A D | verity.c | 744 folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS), in btrfs_read_merkle_tree_page()
|
| H A D | fs.h | 910 return mapping_gfp_constraint(mapping, ~__GFP_FS); in btrfs_alloc_write_mask()
|
| H A D | compression.c | 478 ~__GFP_FS), 0); in add_ra_bio_pages()
|
| /linux-6.15/fs/nilfs2/ |
| H A D | inode.c | 313 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in nilfs_new_inode() 486 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in __nilfs_read_inode()
|
| H A D | namei.c | 167 ~__GFP_FS)); in nilfs_symlink()
|
| /linux-6.15/kernel/power/ |
| H A D | main.c | 49 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()
|
| /linux-6.15/fs/xfs/ |
| H A D | xfs_qm.c | 570 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) in xfs_qm_shrink_scan()
|
| H A D | xfs_iops.c | 1395 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
|
| /linux-6.15/drivers/staging/media/atomisp/pci/hmm/ |
| H A D | hmm_bo.c | 624 const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS; in alloc_private_pages()
|
| /linux-6.15/fs/ceph/ |
| H A D | addr.c | 1999 mapping_gfp_constraint(mapping, ~__GFP_FS)); in ceph_filemap_fault() 2150 ~__GFP_FS)); in ceph_fill_inline_data()
|
| /linux-6.15/fs/ntfs3/ |
| H A D | file.c | 297 mapping_gfp_constraint(mapping, ~__GFP_FS)); in ntfs_zero_range()
|