| /linux-6.15/drivers/infiniband/sw/rxe/ |
| H A D | rxe_mr.c | 260 mr_page_size(mr) - page_offset); in rxe_mr_copy_xarray() 268 page_offset = 0; in rxe_mr_copy_xarray() 288 PAGE_SIZE - page_offset); in rxe_mr_copy_dma() 297 page_offset = 0; in rxe_mr_copy_dma() 429 unsigned int page_offset; in rxe_flush_pmem_iova() local 465 page_offset = 0; in rxe_flush_pmem_iova() 477 unsigned int page_offset; in rxe_mr_do_atomic_op() local 506 if (unlikely(page_offset & 0x7)) { in rxe_mr_do_atomic_op() 518 va[page_offset >> 3] = swap_add; in rxe_mr_do_atomic_op() 521 va[page_offset >> 3] = value; in rxe_mr_do_atomic_op() [all …]
|
| H A D | rxe_odp.c | 113 mr->page_offset = ib_umem_offset(&umem_odp->umem); in rxe_odp_mr_init_user() 262 unsigned int page_offset; in rxe_odp_do_atomic_op() local 281 page_offset = iova & (BIT(umem_odp->page_shift) - 1); in rxe_odp_do_atomic_op() 286 if (unlikely(page_offset & 0x7)) { in rxe_odp_do_atomic_op() 294 value = *orig_val = va[page_offset >> 3]; in rxe_odp_do_atomic_op() 298 va[page_offset >> 3] = swap_add; in rxe_odp_do_atomic_op() 301 va[page_offset >> 3] = value; in rxe_odp_do_atomic_op()
|
| /linux-6.15/drivers/infiniband/hw/mlx5/ |
| H A D | mem.c | 64 u64 page_offset; in __mlx5_umem_find_best_quantized_pgoff() local 77 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 78 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { in __mlx5_umem_find_best_quantized_pgoff() 80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 91 (unsigned long)page_offset / (page_size / scale); in __mlx5_umem_find_best_quantized_pgoff()
|
| H A D | srq_cmd.c | 17 u32 page_offset = in->page_offset; in get_pas_size() local 21 u32 rq_sz_po = rq_sz + (page_offset * po_quanta); in get_pas_size() 34 MLX5_SET(wq, wq, page_offset, in->page_offset); in set_wq() 47 MLX5_SET(srqc, srqc, page_offset, in->page_offset); in set_srqc() 62 in->page_offset = MLX5_GET(wq, wq, page_offset); in get_wq() 75 in->page_offset = MLX5_GET(srqc, srqc, page_offset); in get_srqc() 111 MLX5_ADAPTER_PAGE_SHIFT, page_offset, \ 112 64, &(in)->page_offset))
|
| /linux-6.15/drivers/net/ethernet/cavium/liquidio/ |
| H A D | octeon_network.h | 298 pg_info->page_offset = 0; in recv_buffer_alloc() 300 skb_pg_info->page_offset = 0; in recv_buffer_alloc() 324 skb_pg_info->page_offset = 0; in recv_buffer_fast_alloc() 348 pg_info->page_offset = 0; in recv_buffer_recycle() 353 if (pg_info->page_offset == 0) in recv_buffer_recycle() 354 pg_info->page_offset = LIO_RXBUFFER_SZ; in recv_buffer_recycle() 356 pg_info->page_offset = 0; in recv_buffer_recycle() 384 skb_pg_info->page_offset = pg_info->page_offset; in recv_buffer_reuse() 398 pg_info->page_offset = 0; in recv_buffer_destroy() 415 pg_info->page_offset = 0; in recv_buffer_free() [all …]
|
| /linux-6.15/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_page_dirty.c | 381 unsigned long page_offset; in vmw_bo_vm_mkwrite() local 397 if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { in vmw_bo_vm_mkwrite() 403 !test_bit(page_offset, &vbo->dirty->bitmap[0])) { in vmw_bo_vm_mkwrite() 406 __set_bit(page_offset, &dirty->bitmap[0]); in vmw_bo_vm_mkwrite() 407 dirty->start = min(dirty->start, page_offset); in vmw_bo_vm_mkwrite() 408 dirty->end = max(dirty->end, page_offset + 1); in vmw_bo_vm_mkwrite() 435 unsigned long page_offset; in vmw_bo_vm_fault() local 437 page_offset = vmf->pgoff - in vmw_bo_vm_fault() 439 if (page_offset >= PFN_UP(bo->resource->size) || in vmw_bo_vm_fault() 440 vmw_resources_clean(vbo, page_offset, in vmw_bo_vm_fault() [all …]
|
| /linux-6.15/drivers/vfio/pci/pds/ |
| H A D | lm.c | 201 size_t page_offset; in pds_vfio_save_read() local 207 page_offset = (*pos) % PAGE_SIZE; in pds_vfio_save_read() 208 page = pds_vfio_get_file_page(lm_file, *pos - page_offset); in pds_vfio_save_read() 215 page_len = min_t(size_t, len, PAGE_SIZE - page_offset); in pds_vfio_save_read() 217 err = copy_to_user(buf, from_buff + page_offset, page_len); in pds_vfio_save_read() 299 size_t page_offset; in pds_vfio_restore_write() local 305 page_offset = (*pos) % PAGE_SIZE; in pds_vfio_restore_write() 306 page = pds_vfio_get_file_page(lm_file, *pos - page_offset); in pds_vfio_restore_write() 313 page_len = min_t(size_t, len, PAGE_SIZE - page_offset); in pds_vfio_restore_write() 315 err = copy_from_user(to_buff + page_offset, buf, page_len); in pds_vfio_restore_write()
|
| /linux-6.15/drivers/gpu/drm/ttm/ |
| H A D | ttm_bo_vm.c | 85 unsigned long page_offset) in ttm_bo_io_mem_pfn() argument 90 return bdev->funcs->io_mem_pfn(bo, page_offset); in ttm_bo_io_mem_pfn() 92 return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; in ttm_bo_io_mem_pfn() 188 unsigned long page_offset; in ttm_bo_vm_fault_reserved() local 210 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault_reserved() 215 if (unlikely(page_offset >= PFN_UP(bo->base.size))) in ttm_bo_vm_fault_reserved() 247 pfn = ttm_bo_io_mem_pfn(bo, page_offset); in ttm_bo_vm_fault_reserved() 249 page = ttm->pages[page_offset]; in ttm_bo_vm_fault_reserved() 277 if (unlikely(++page_offset >= page_last)) in ttm_bo_vm_fault_reserved()
|
| /linux-6.15/drivers/scsi/fnic/ |
| H A D | fnic_trace.c | 122 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 164 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 550 fnic_trace_entries.page_offset = in fnic_trace_buf_init() 552 if (!fnic_trace_entries.page_offset) { in fnic_trace_buf_init() 589 if (fnic_trace_entries.page_offset) { in fnic_trace_free() 591 fnic_trace_entries.page_offset = NULL; in fnic_trace_free() 631 fc_trace_entries.page_offset = in fnic_fc_trace_init() 633 if (!fc_trace_entries.page_offset) { in fnic_fc_trace_init() 670 if (fc_trace_entries.page_offset) { in fnic_fc_trace_free() 672 fc_trace_entries.page_offset = NULL; in fnic_fc_trace_free() [all …]
|
| /linux-6.15/drivers/net/wireless/realtek/rtw89/ |
| H A D | efuse_be.c | 223 u32 hdr, page_offset; in rtw89_eeprom_parser_be() local 231 page_offset = u32_get_bits(efuse_block->offset, RTW89_EFUSE_BLOCK_SIZE_MASK); in rtw89_eeprom_parser_be() 233 min = ALIGN_DOWN(page_offset, 2); in rtw89_eeprom_parser_be() 234 max = ALIGN(page_offset + size, 2); in rtw89_eeprom_parser_be() 289 if (log_idx == min && page_offset > min) { in rtw89_eeprom_parser_be() 290 log_map[log_idx - page_offset + 1] = val1; in rtw89_eeprom_parser_be() 292 page_offset + size < max) { in rtw89_eeprom_parser_be() 293 log_map[log_idx - page_offset] = val0; in rtw89_eeprom_parser_be() 295 log_map[log_idx - page_offset] = val0; in rtw89_eeprom_parser_be() 296 log_map[log_idx - page_offset + 1] = val1; in rtw89_eeprom_parser_be()
|
| /linux-6.15/drivers/net/ethernet/google/gve/ |
| H A D | gve_buffer_mgmt_dqo.c | 139 buf_state->page_info.page_offset = 0; in gve_alloc_qpl_page_dqo() 182 buf_state->page_info.page_offset; in gve_try_recycle_buf() 186 buf_state->page_info.page_offset += data_buffer_size; in gve_try_recycle_buf() 187 buf_state->page_info.page_offset &= (PAGE_SIZE - 1); in gve_try_recycle_buf() 192 if (buf_state->page_info.page_offset == in gve_try_recycle_buf() 225 &buf_state->page_info.page_offset, in gve_alloc_from_page_pool() 308 buf_state->page_info.page_offset + in gve_alloc_buffer()
|
| /linux-6.15/scripts/ |
| H A D | leaking_addresses.pl | 330 state $page_offset = get_page_offset(); 336 if (hex($match) < $page_offset) { 346 my $page_offset; 354 $page_offset = get_kernel_config_option('CONFIG_PAGE_OFFSET'); 355 if (!$page_offset) { 358 return $page_offset;
|
| /linux-6.15/fs/hfs/ |
| H A D | bnode.c | 25 off += node->page_offset; in hfs_bnode_read() 83 off += node->page_offset; in hfs_bnode_write() 107 off += node->page_offset; in hfs_bnode_clear() 122 src += src_node->page_offset; in hfs_bnode_copy() 123 dst += dst_node->page_offset; in hfs_bnode_copy() 139 src += node->page_offset; in hfs_bnode_move() 140 dst += node->page_offset; in hfs_bnode_move() 294 node->page_offset = off & ~PAGE_MASK; in __hfs_bnode_create() 350 node->page_offset); in hfs_bnode_find() 440 memzero_page(*pagep, node->page_offset, in hfs_bnode_create()
|
| /linux-6.15/net/ceph/ |
| H A D | messenger.c | 739 size_t *page_offset, in ceph_msg_data_bio_next() argument 745 *page_offset = bv.bv_offset; in ceph_msg_data_bio_next() 794 size_t *page_offset, in ceph_msg_data_bvecs_next() argument 800 *page_offset = bv.bv_offset; in ceph_msg_data_bvecs_next() 853 size_t *page_offset, size_t *length) in ceph_msg_data_pages_next() argument 862 *page_offset = cursor->page_offset; in ceph_msg_data_pages_next() 877 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; in ceph_msg_data_pages_advance() 878 if (!bytes || cursor->page_offset) in ceph_msg_data_pages_advance() 921 size_t *page_offset, size_t *length) in ceph_msg_data_pagelist_next() argument 993 1, page_offset); in ceph_msg_data_iter_next() [all …]
|
| /linux-6.15/drivers/mtd/tests/ |
| H A D | nandbiterrs.c | 45 static unsigned page_offset; variable 46 module_param(page_offset, uint, S_IRUGO); 47 MODULE_PARM_DESC(page_offset, "Page number relative to dev start"); 358 offset = (loff_t)page_offset * mtd->writesize; in mtd_nandbiterrs_init() 362 page_offset, offset, eraseblock); in mtd_nandbiterrs_init()
|
| /linux-6.15/drivers/nvmem/ |
| H A D | rave-sp-eeprom.c | 161 const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; in rave_sp_eeprom_page_access() local 172 if (WARN_ON(data_len > sizeof(page.data) - page_offset)) in rave_sp_eeprom_page_access() 187 memcpy(&page.data[page_offset], data, data_len); in rave_sp_eeprom_page_access() 199 memcpy(data, &page.data[page_offset], data_len); in rave_sp_eeprom_page_access()
|
| /linux-6.15/drivers/gpu/drm/qxl/ |
| H A D | qxl_image.c | 166 unsigned int page_base, page_offset, out_offset; in qxl_image_init_helper() local 175 page_offset = offset_in_page(out_offset); in qxl_image_init_helper() 176 size = min((int)(PAGE_SIZE - page_offset), remain); in qxl_image_init_helper() 179 k_data = ptr + page_offset; in qxl_image_init_helper()
|
| /linux-6.15/virt/kvm/ |
| H A D | pfncache.c | 258 unsigned long page_offset; in __kvm_gpc_refresh() local 284 page_offset = offset_in_page(uhva); in __kvm_gpc_refresh() 295 page_offset = offset_in_page(gpa); in __kvm_gpc_refresh() 323 gpc->uhva += page_offset; in __kvm_gpc_refresh() 337 gpc->khva = old_khva + page_offset; in __kvm_gpc_refresh()
|
| /linux-6.15/fs/hfsplus/ |
| H A D | bnode.c | 27 off += node->page_offset; in hfs_bnode_read() 84 off += node->page_offset; in hfs_bnode_write() 112 off += node->page_offset; in hfs_bnode_clear() 136 src += src_node->page_offset; in hfs_bnode_copy() 137 dst += dst_node->page_offset; in hfs_bnode_copy() 190 src += node->page_offset; in hfs_bnode_move() 191 dst += node->page_offset; in hfs_bnode_move() 444 node->page_offset = off & ~PAGE_MASK; in __hfs_bnode_create() 501 node->page_offset); in hfs_bnode_find() 591 memzero_page(*pagep, node->page_offset, in hfs_bnode_create()
|
| /linux-6.15/drivers/net/ethernet/sfc/falcon/ |
| H A D | rx.c | 59 return page_address(buf->page) + buf->page_offset; in ef4_rx_buf_va() 157 unsigned int page_offset; in ef4_init_rx_buffers() local 188 page_offset = sizeof(struct ef4_rx_page_state); in ef4_init_rx_buffers() 195 rx_buf->page_offset = page_offset + efx->rx_ip_align; in ef4_init_rx_buffers() 201 page_offset += efx->rx_page_buf_step; in ef4_init_rx_buffers() 202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in ef4_init_rx_buffers() 454 rx_buf->page, rx_buf->page_offset, in ef4_rx_packet_gro() 499 rx_buf->page_offset += hdr_len; in ef4_rx_mk_skb() 504 rx_buf->page, rx_buf->page_offset, in ef4_rx_mk_skb() 586 rx_buf->page_offset += efx->rx_prefix_size; in ef4_rx_packet()
|
| /linux-6.15/drivers/iommu/iommufd/ |
| H A D | pages.c | 395 unsigned int page_offset = 0; in batch_from_domain() local 401 page_offset = area->page_offset; in batch_from_domain() 411 iova += PAGE_SIZE - page_offset; in batch_from_domain() 412 page_offset = 0; in batch_from_domain() 423 unsigned int page_offset = 0; in raw_pages_from_domain() local 429 page_offset = area->page_offset; in raw_pages_from_domain() 434 page_offset = 0; in raw_pages_from_domain() 495 unsigned int page_offset = 0; in batch_to_domain() local 504 page_offset = area->page_offset; in batch_to_domain() 510 page_offset); in batch_to_domain() [all …]
|
| /linux-6.15/drivers/infiniband/hw/hfi1/ |
| H A D | pin_system.c | 289 unsigned int page_offset; in add_mapping_to_sdma_packet() local 310 page_offset = start - ALIGN_DOWN(start, PAGE_SIZE); in add_mapping_to_sdma_packet() 311 from_this_page = PAGE_SIZE - page_offset; in add_mapping_to_sdma_packet() 327 page_offset, from_this_page, in add_mapping_to_sdma_packet() 338 ret, page_index, page_offset, from_this_page); in add_mapping_to_sdma_packet()
|
| /linux-6.15/tools/testing/selftests/powerpc/primitives/ |
| H A D | load_unaligned_zeropad.c | 102 static int do_one_test(char *p, int page_offset) in do_one_test() argument 114 …printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, sho… in do_one_test()
|
| /linux-6.15/drivers/gpu/drm/panfrost/ |
| H A D | panfrost_mmu.c | 449 pgoff_t page_offset; in panfrost_mmu_map_fault_addr() local 468 page_offset = addr >> PAGE_SHIFT; in panfrost_mmu_map_fault_addr() 469 page_offset -= bomapping->mmnode.start; in panfrost_mmu_map_fault_addr() 495 if (pages[page_offset]) { in panfrost_mmu_map_fault_addr() 504 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { in panfrost_mmu_map_fault_addr() 520 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; in panfrost_mmu_map_fault_addr() 521 ret = sg_alloc_table_from_pages(sgt, pages + page_offset, in panfrost_mmu_map_fault_addr()
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | en_rx.c | 72 frags->page_offset = priv->rx_headroom; in mlx4_en_alloc_frags() 77 rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset); in mlx4_en_alloc_frags() 458 __skb_fill_page_desc(skb, nr, page, frags->page_offset, in mlx4_en_complete_rx_desc() 463 frags->page_offset ^= PAGE_SIZE / 2; in mlx4_en_complete_rx_desc() 475 frags->page_offset += sz_align; in mlx4_en_complete_rx_desc() 722 va = page_address(frags[0].page) + frags[0].page_offset; in mlx4_en_process_rx_cq() 752 dma += frags[0].page_offset; in mlx4_en_process_rx_cq() 792 dma += frags[0].page_offset; in mlx4_en_process_rx_cq() 797 xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset, in mlx4_en_process_rx_cq() 798 frags[0].page_offset, length, true); in mlx4_en_process_rx_cq() [all …]
|