| /linux-6.15/mm/ |
| H A D | percpu-km.c | 65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk() 109 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local 118 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info() 120 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info() 122 alloc_pages - nr_pages); in pcpu_verify_alloc_info()
|
| /linux-6.15/tools/testing/selftests/bpf/progs/ |
| H A D | verifier_arena_large.c | 79 __noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass, in alloc_pages() function 119 err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2); in big_alloc2() 134 err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4); in big_alloc2() 150 err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6); in big_alloc2()
|
| /linux-6.15/drivers/xen/ |
| H A D | unpopulated-alloc.c | 39 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); in fill_list() local 53 alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end, in fill_list() 111 for (i = 0; i < alloc_pages; i++) { in fill_list() 128 for (i = 0; i < alloc_pages; i++) { in fill_list()
|
| /linux-6.15/lib/tests/ |
| H A D | fortify_kunit.c | 283 vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \ 285 vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \ 287 __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \ 298 kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \ 301 kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ 304 kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \ 310 kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \ 313 kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \ 316 kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \ 319 kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \ [all …]
|
| /linux-6.15/rust/helpers/ |
| H A D | page.c | 8 return alloc_pages(gfp_mask, order); in rust_helper_alloc_pages()
|
| /linux-6.15/arch/riscv/kernel/ |
| H A D | unaligned_access_speed.c | 154 bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); in check_unaligned_access_speed_all_cpus() 252 buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); in riscv_online_cpu() 293 page = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); in check_vector_unaligned_access()
|
| /linux-6.15/drivers/iommu/ |
| H A D | iommu-pages.h | 60 page = alloc_pages(gfp | __GFP_ZERO, order); in __iommu_alloc_pages()
|
| /linux-6.15/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 83 if (alloc_pages) { in pvrdma_page_dir_init()
|
| /linux-6.15/Documentation/translations/zh_CN/core-api/ |
| H A D | memory-allocation.rst | 21 你可以用 alloc_pages 直接向页面分配器请求页面。也可以使用更专业的分配器,
|
| /linux-6.15/drivers/net/ethernet/qlogic/qed/ |
| H A D | qed_chain.c | 273 goto alloc_pages; in qed_chain_alloc_pbl() 288 alloc_pages: in qed_chain_alloc_pbl()
|
| /linux-6.15/mm/kmsan/ |
| H A D | hooks.c | 164 shadow = alloc_pages(gfp_mask, 1); in kmsan_ioremap_page_range() 165 origin = alloc_pages(gfp_mask, 1); in kmsan_ioremap_page_range()
|
| /linux-6.15/arch/riscv/kvm/ |
| H A D | nacl.c | 141 shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, in kvm_riscv_nacl_init()
|
| /linux-6.15/rust/kernel/ |
| H A D | page.rs | 75 let page = unsafe { bindings::alloc_pages(flags.as_raw(), 0) }; in alloc_page()
|
| /linux-6.15/include/linux/ |
| H A D | gfp.h | 341 #define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) macro 346 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
|
| /linux-6.15/lib/ |
| H A D | test_meminit.c | 69 page = alloc_pages(GFP_KERNEL, order); in do_alloc_pages_order() 76 page = alloc_pages(GFP_KERNEL, order); in do_alloc_pages_order()
|
| /linux-6.15/arch/powerpc/include/asm/book3s/64/ |
| H A D | pgalloc.h | 31 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), in radix__pgd_alloc()
|
| /linux-6.15/drivers/gpu/drm/i915/gem/ |
| H A D | i915_gem_internal.c | 75 page = alloc_pages(gfp | (order ? QUIET : MAYFAIL), in i915_gem_object_get_pages_internal()
|
| /linux-6.15/drivers/media/pci/intel/ipu6/ |
| H A D | ipu6-dma.c | 74 pages[i] = alloc_pages(gfp, order); in __alloc_buffer() 76 pages[i] = alloc_pages(gfp, --order); in __alloc_buffer()
|
| /linux-6.15/fs/ramfs/ |
| H A D | file-nommu.c | 84 pages = alloc_pages(gfp, order); in ramfs_nommu_expand_for_mapping()
|
| /linux-6.15/arch/x86/kernel/ |
| H A D | kvmclock.c | 215 p = alloc_pages(GFP_KERNEL, order); in kvmclock_init_mem()
|
| /linux-6.15/drivers/staging/media/ipu3/ |
| H A D | ipu3-dmamap.c | 58 page = alloc_pages((order_mask - order_size) ? in imgu_dmamap_alloc_buffer()
|
| /linux-6.15/arch/x86/platform/efi/ |
| H A D | memmap.c | 25 struct page *p = alloc_pages(GFP_KERNEL, order); in __efi_memmap_alloc_late()
|
| /linux-6.15/kernel/dma/ |
| H A D | pool.c | 96 page = alloc_pages(gfp, order); in atomic_pool_expand()
|
| /linux-6.15/drivers/net/wireless/intel/iwlwifi/fw/ |
| H A D | paging.c | 72 block = alloc_pages(GFP_KERNEL, order); in iwl_alloc_fw_paging_mem()
|
| /linux-6.15/mm/kasan/ |
| H A D | kasan_test_c.c | 322 pages = alloc_pages(GFP_KERNEL, order); in page_alloc_oob_right() 336 pages = alloc_pages(GFP_KERNEL, order); in page_alloc_uaf() 1802 p_page = alloc_pages(GFP_KERNEL, 1); in vmap_tags() 1843 page = alloc_pages(GFP_KERNEL, 1); in vm_map_ram_tags() 1886 pages = alloc_pages(GFP_KERNEL, order); in match_all_not_assigned()
|