| /linux-6.15/include/linux/ |
| H A D | folio_queue.h | 238 folioq->orders[slot] = folio_order(folio); in folioq_append() 260 folioq->orders[slot] = folio_order(folio); in folioq_append_mark()
|
| H A D | huge_mm.h | 474 return folio_order(folio) >= HPAGE_PMD_ORDER; in folio_test_pmd_mappable()
|
| H A D | mm.h | 1214 static inline unsigned int folio_order(const struct folio *folio) in folio_order() function 2011 return folio_order(folio) > 1; in folio_has_pincount() 2229 return PAGE_SHIFT + folio_order(folio); in folio_shift() 2242 return PAGE_SIZE << folio_order(folio); in folio_size()
|
| /linux-6.15/mm/ |
| H A D | page_io.c | 279 count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); in swap_writepage() 300 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); in count_swpout_vm_event() 497 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in sio_read_complete() 592 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in swap_read_folio_bdev_sync() 609 count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); in swap_read_folio_bdev_async()
|
| H A D | huge_memory.c | 3474 int order = folio_order(folio); in __split_unmapped_folio() 3507 int old_order = folio_order(folio); in __split_unmapped_folio() 3564 mod_mthp_stat(folio_order(release), in __split_unmapped_folio() 3736 int order = folio_order(folio); in __folio_split() 3747 if (new_order >= folio_order(folio)) in __folio_split() 3867 if (folio_order(folio) > 1 && in __folio_split() 3872 mod_mthp_stat(folio_order(folio), in __folio_split() 4064 mod_mthp_stat(folio_order(folio), in __folio_unqueue_deferred_split() 4088 if (folio_order(folio) <= 1) in deferred_split_folio() 4201 mod_mthp_stat(folio_order(folio), in deferred_split_scan() [all …]
|
| H A D | migrate.c | 493 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 519 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); in __folio_migrate_mapping() 2181 order = folio_order(src); in alloc_migration_target() 2627 int order = folio_order(src); in alloc_misplaced_dst_folio() 2690 folio_order(folio), ZONE_MOVABLE); in migrate_misplaced_folio_prepare()
|
| H A D | debug.c | 88 folio_order(folio), in __dump_folio()
|
| H A D | slab.h | 221 return folio_order(slab_folio(slab)); in slab_order()
|
| H A D | readahead.c | 632 unsigned int order = folio_order(folio); in page_cache_async_ra()
|
| H A D | shmem.c | 886 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in shmem_add_to_page_cache() 1913 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); in shmem_alloc_and_add_folio() 1914 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); in shmem_alloc_and_add_folio() 2062 new = shmem_alloc_folio(gfp, folio_order(old), info, index); in shmem_replace_folio() 2329 } else if (order != folio_order(folio)) { in shmem_swapin_folio() 2360 xa_get_order(&mapping->i_pages, index) != folio_order(folio)) { in shmem_swapin_folio() 2526 count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); in shmem_get_folio_gfp()
|
| H A D | swap.c | 112 free_frozen_pages(&folio->page, folio_order(folio)); in __folio_put()
|
| H A D | khugepaged.c | 1546 if (folio_order(folio) != HPAGE_PMD_ORDER) { in collapse_pte_mapped_thp() 1981 if (folio_order(folio) == HPAGE_PMD_ORDER && in collapse_file() 2298 if (folio_order(folio) == HPAGE_PMD_ORDER && in hpage_collapse_scan_file()
|
| H A D | compaction.c | 1221 if (unlikely(skip_isolation_on_order(folio_order(folio), in isolate_migratepages_block() 1837 int order = folio_order(src); in compaction_alloc_noprof() 1894 int order = folio_order(dst); in compaction_free()
|
| H A D | filemap.c | 136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 860 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in __filemap_add_folio() 863 unsigned int forder = folio_order(folio); in __filemap_add_folio() 867 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), in __filemap_add_folio()
|
| H A D | swap_state.c | 94 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); in add_to_swap_cache()
|
| H A D | mempolicy.c | 1255 order = folio_order(src); in alloc_migration_target_by_mpol() 1412 order = folio_order(folio); in do_mbind() 2809 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx); in mpol_misplaced()
|
| /linux-6.15/virt/kvm/ |
| H A D | guest_memfd.c | 34 int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio)); in __kvm_gmem_prepare_folio() 80 WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio))); in kvm_gmem_prepare_folio() 82 index = ALIGN_DOWN(index, 1 << folio_order(folio)); in kvm_gmem_prepare_folio() 370 int order = folio_order(folio); in kvm_gmem_free_folio()
|
| /linux-6.15/fs/bcachefs/ |
| H A D | fs-io-pagecache.h | 26 return PAGE_SECTORS << folio_order(folio); in folio_sectors()
|
| /linux-6.15/fs/netfs/ |
| H A D | rolling_buffer.c | 137 unsigned int order = folio_order(folio); in rolling_buffer_load_from_ra()
|
| /linux-6.15/include/trace/events/ |
| H A D | filemap.h | 38 __entry->order = folio_order(folio);
|
| /linux-6.15/fs/btrfs/ |
| H A D | extent_io.h | 303 if (folio_order(eb->folios[0])) in num_extent_folios()
|
| H A D | subpage.c | 185 ASSERT(folio_order(folio) == 0); in btrfs_subpage_assert()
|
| H A D | bio.c | 172 ASSERT(folio_order(page_folio(bv->bv_page)) == 0); in btrfs_end_repair_bio()
|
| /linux-6.15/fs/ |
| H A D | dax.c | 396 order = folio_order(folio); in dax_folio_put() 432 WARN_ON_ONCE(folio_order(folio)); in dax_folio_init() 461 WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); in dax_associate_entry()
|
| /linux-6.15/fs/nfs/ |
| H A D | internal.h | 860 pgoff_t index = folio->index >> folio_order(folio); in nfs_folio_length()
|