| /f-stack/freebsd/vm/ |
| H A D | vm_phys.c | 492 u_long npages; in vm_phys_init() local 502 npages = 0; in vm_phys_init() 545 npages = 0; in vm_phys_init() 699 m, npages)); in vm_phys_enq_range() 710 npages -= n; in vm_phys_enq_range() 711 } while (npages > 0); in vm_phys_enq_range() 760 return (npages); in vm_phys_alloc_npages() 762 return (npages); in vm_phys_alloc_npages() 1162 m_end = m + npages; in vm_phys_enqueue_contig() 1208 m_end = m + npages; in vm_phys_free_contig() [all …]
|
| H A D | vm_phys.h | 62 vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, 66 int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]); 69 void vm_phys_enqueue_contig(vm_page_t m, u_long npages); 74 void vm_phys_free_contig(vm_page_t m, u_long npages); 80 vm_page_t vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low,
|
| H A D | sg_pager.c | 76 vm_pindex_t npages, pindex; in sg_pager_alloc() local 89 npages = 0; in sg_pager_alloc() 95 npages += sg->sg_segs[i].ss_len / PAGE_SIZE; in sg_pager_alloc() 104 if (pindex > npages || pindex < OFF_TO_IDX(foff) || in sg_pager_alloc() 116 object = vm_object_allocate(OBJT_SG, npages); in sg_pager_alloc()
|
| H A D | vm_reserv.c | 645 pindex + npages > object->size) in vm_reserv_alloc_contig() 661 size = npages << PAGE_SHIFT; in vm_reserv_alloc_contig() 674 if (index + npages > VM_LEVEL_0_NPAGES) in vm_reserv_alloc_contig() 689 for (i = 0; i < npages; i++) in vm_reserv_alloc_contig() 692 if (!vm_domain_allocate(vmd, req, npages)) in vm_reserv_alloc_contig() 694 for (i = 0; i < npages; i++) in vm_reserv_alloc_contig() 778 vm_domain_freecnt_inc(vmd, npages); in vm_reserv_alloc_contig() 804 npages -= n; in vm_reserv_alloc_contig() 1247 size = npages << PAGE_SHIFT; in vm_reserv_test_contig() 1324 if (npages > VM_LEVEL_0_NPAGES - 1) in vm_reserv_reclaim_contig() [all …]
|
| H A D | vm_reserv.h | 51 int domain, int req, vm_page_t mpred, u_long npages, 62 bool vm_reserv_reclaim_contig(int domain, u_long npages,
|
| H A D | vm_glue.c | 177 vm_size_t npages; in vslock() local 185 npages = atop(end - start); in vslock() 186 if (npages > vm_page_max_user_wired) in vslock() 416 int npages, int req_class) in vm_thread_stack_back() argument 424 for (n = 0; n < npages;) { in vm_thread_stack_back() 434 &ma[n], npages - n); in vm_thread_stack_back()
|
| H A D | vm_page.c | 2038 limit += npages; in _vm_domain_allocate() 2043 new = old - npages; in _vm_domain_allocate() 2353 vm_wire_add(npages); in vm_page_alloc_contig_domain() 2370 vm_wire_sub(npages); in vm_page_alloc_contig_domain() 2600 if (m + npages > m_end) in vm_page_scan_contig() 2722 if (run_len >= npages) in vm_page_scan_contig() 2760 m_end = m_run + npages; in vm_page_reclaim_run() 2829 pa += ptoa(npages); in vm_page_reclaim_run() 2995 if (count < npages + vmd->vmd_free_reserved || (count < npages + in vm_page_reclaim_contig_domain() 3035 reclaimed += npages; in vm_page_reclaim_contig_domain() [all …]
|
| H A D | vm_kern.c | 175 int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high, in kmem_alloc_contig_pages() argument 189 npages, low, high, alignment, boundary, memattr); in kmem_alloc_contig_pages() 194 if (!vm_page_reclaim_contig_domain(domain, pflags, npages, in kmem_alloc_contig_pages() 297 u_long npages; in kmem_alloc_contig_domain() local 307 npages = atop(size); in kmem_alloc_contig_domain() 310 pflags, npages, low, high, alignment, boundary, memattr); in kmem_alloc_contig_domain() 319 end_m = m + npages; in kmem_alloc_contig_domain()
|
| H A D | vm_page.h | 606 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, 609 vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, 649 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, 651 bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, 665 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
|
| H A D | swap_pager.c | 793 int mpages, npages; in swp_pager_getswapspace() local 811 if (npages == 1) in swp_pager_getswapspace() 813 mpages = npages - 1; in swp_pager_getswapspace() 814 npages >>= 1; in swp_pager_getswapspace() 818 *io_npages = npages; in swp_pager_getswapspace() 820 sp->sw_used += npages; in swp_pager_getswapspace() 821 swap_pager_avail -= npages; in swp_pager_getswapspace() 880 if (npages == 0) in swp_pager_freeswapspace() 885 sp->sw_used -= npages; in swp_pager_freeswapspace() 893 npages); in swp_pager_freeswapspace() [all …]
|
| H A D | device_pager.c | 434 unsigned int npages; in old_dev_pager_ctor() local 451 npages = OFF_TO_IDX(size); in old_dev_pager_ctor() 453 for (off = foff; npages--; off += PAGE_SIZE) { in old_dev_pager_ctor()
|
| H A D | vm_fault.c | 422 int bdry_idx, i, npages, psind, rv; in vm_fault_populate() local 541 pidx += npages, m = vm_page_next(&m[npages - 1])) { in vm_fault_populate() 554 npages = atop(pagesizes[psind]); in vm_fault_populate() 555 for (i = 0; i < npages; i++) { in vm_fault_populate() 564 for (i = 0; i < npages; i++) { in vm_fault_populate() 575 for (i = 0; i < npages; i++) { in vm_fault_populate()
|
| /f-stack/freebsd/kern/ |
| H A D | kern_sendfile.c | 91 int npages; member 400 sfio->npages); in sendfile_iodone() 422 npages = sfio->npages; in sendfile_swapin() 433 if (grabbed < npages) { in sendfile_swapin() 436 npages = grabbed; in sendfile_swapin() 550 if (i + count == npages) in sendfile_swapin() 924 npages; in vn_sendfile() 929 npages, rhpages); in vn_sendfile() 937 sfio->npages = npages; in vn_sendfile() 996 sfio->npages = i; in vn_sendfile() [all …]
|
| H A D | kern_physio.c | 53 int error, i, npages, maxpages; in physio() local 57 npages = 0; in physio() 157 if ((npages = vm_fault_quick_hold_pages( in physio() 167 pages, npages); in physio() 171 bp->bio_ma_n = npages; in physio() 187 pmap_qremove((vm_offset_t)sa, npages); in physio() 188 vm_page_unhold_pages(pages, npages); in physio()
|
| /f-stack/freebsd/arm/nvidia/drm2/ |
| H A D | tegra_bo.c | 62 pmap_qremove(bo->vbase, bo->npages); in tegra_bo_destruct() 65 for (i = 0; i < bo->npages; i++) { in tegra_bo_destruct() 96 tegra_bo_alloc_contig(size_t npages, u_long alignment, vm_memattr_t memattr, in tegra_bo_alloc_contig() argument 110 m = vm_page_alloc_contig(NULL, 0, pflags, npages, low, high, alignment, in tegra_bo_alloc_contig() 114 if (!vm_page_reclaim_contig(pflags, npages, low, high, in tegra_bo_alloc_contig() 123 for (i = 0; i < npages; i++, m++) { in tegra_bo_alloc_contig() 148 for (i = 0; i < bo->npages; i++) { in tegra_bo_init_pager() 168 pmap_qenter(bo->vbase, bo->m, bo->npages); in tegra_bo_init_pager() 181 bo->npages = atop(size); in tegra_bo_alloc() 182 bo->m = malloc(sizeof(vm_page_t *) * bo->npages, DRM_MEM_DRIVER, in tegra_bo_alloc() [all …]
|
| H A D | tegra_drm.h | 38 size_t npages; member
|
| /f-stack/dpdk/drivers/common/sfc_efx/base/ |
| H A D | mcdi_mon.c | 274 __out_ecount(npages) uint32_t *sensor_maskp, in efx_mcdi_sensor_info() 275 __in size_t npages) in efx_mcdi_sensor_info() argument 285 if (npages < 1) { in efx_mcdi_sensor_info() 290 for (page = 0; page < npages; page++) { in efx_mcdi_sensor_info() 311 if ((page != (npages - 1)) && in efx_mcdi_sensor_info() 319 if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) { in efx_mcdi_sensor_info() 541 uint32_t npages; in mcdi_mon_cfg_build() local 571 npages = 0; in mcdi_mon_cfg_build() 572 if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0) in mcdi_mon_cfg_build() 576 encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t); in mcdi_mon_cfg_build() [all …]
|
| H A D | efx_mcdi.c | 2577 int npages; in efx_mcdi_init_evq() local 2581 npages = efx_evq_nbufs(enp, nevs, flags); in efx_mcdi_init_evq() 2582 if (npages > INIT_EVQ_MAXNBUFS) { in efx_mcdi_init_evq() 2589 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); in efx_mcdi_init_evq() 2702 for (i = 0; i < npages; i++) { in efx_mcdi_init_evq() 2804 int npages = efx_rxq_nbufs(enp, ndescs); in efx_mcdi_init_rxq() local 2902 for (i = 0; i < npages; i++) { in efx_mcdi_init_rxq() 2983 int npages; in efx_mcdi_init_txq() local 2996 npages = efx_txq_nbufs(enp, ndescs); in efx_mcdi_init_txq() 3004 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); in efx_mcdi_init_txq() [all …]
|
| /f-stack/freebsd/i386/i386/ |
| H A D | vm86.c | 607 for (i = 0; i < vmc->npages; i++) in vm86_getpage() 618 for (i = 0; i < vmc->npages; i++) in vm86_addpage() 622 if (vmc->npages == VM86_PMAPSIZE) in vm86_addpage() 630 i = vmc->npages++; in vm86_addpage() 738 for (i = 0; i < vmc->npages; i++) { in vm86_datacall() 747 for (i = 0; i < vmc->npages; i++) { in vm86_datacall() 764 for (i = 0; i < vmc->npages; i++) { in vm86_datacall() 770 for (i = 0; i < vmc->npages; i++) { in vm86_datacall() 789 for (i = 0; i < vmc->npages; i++) in vm86_getaddr() 801 for (i = 0; i < vmc->npages; i++) in vm86_getptr()
|
| /f-stack/freebsd/amd64/sgx/ |
| H A D | sgxvar.h | 83 uint32_t npages; member
|
| H A D | sgx.c | 1077 sc->npages = sc->epc_size / SGX_PAGE_SIZE; in sgx_get_epc_area() 1093 sc->epc_pages = malloc(sizeof(struct epc_page) * sc->npages, in sgx_get_epc_area() 1096 for (i = 0; i < sc->npages; i++) { in sgx_get_epc_area() 1160 sc->epc_base, sc->epc_size, sc->npages); in sgx_load()
|
| /f-stack/freebsd/sys/ |
| H A D | pipe.h | 81 int npages; /* number of pages */ member
|
| /f-stack/app/redis-5.0.5/deps/jemalloc/include/jemalloc/internal/ |
| H A D | extent_structs.h | 207 atomic_zu_t npages; member
|
| /f-stack/app/redis-5.0.5/deps/jemalloc/src/ |
| H A D | extent.c | 292 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); in extents_init() 305 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); in extents_npages_get() 322 size_t npages = size >> LG_PAGE; in extents_insert_locked() local 329 atomic_load_zu(&extents->npages, ATOMIC_RELAXED); in extents_insert_locked() 330 atomic_store_zu(&extents->npages, cur_extents_npages + npages, in extents_insert_locked() 348 size_t npages = size >> LG_PAGE; in extents_remove_locked() local 354 atomic_load_zu(&extents->npages, ATOMIC_RELAXED); in extents_remove_locked() 355 assert(cur_extents_npages >= npages); in extents_remove_locked() 356 atomic_store_zu(&extents->npages, in extents_remove_locked() 551 size_t extents_npages = atomic_load_zu(&extents->npages, in extents_evict()
|
| /f-stack/freebsd/i386/include/ |
| H A D | vm86.h | 109 int npages; member
|