| /linux-6.15/net/sctp/ |
| H A D | inqueue.c | 45 chunk->skb = chunk->head_skb; in sctp_inq_chunk_free() 132 if (chunk->head_skb == chunk->skb) { in sctp_inq_pop() 133 chunk->skb = skb_shinfo(chunk->skb)->frag_list; in sctp_inq_pop() 137 chunk->skb = chunk->skb->next; in sctp_inq_pop() 147 skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); in sctp_inq_pop() 169 chunk->head_skb = chunk->skb; in sctp_inq_pop() 172 if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) in sctp_inq_pop() 173 chunk->skb = skb_shinfo(chunk->skb)->frag_list; in sctp_inq_pop() 201 cb->chunk = head_cb->chunk; in sctp_inq_pop() 217 chunk->chunk_end = skb_tail_pointer(chunk->skb); in sctp_inq_pop() [all …]
|
| H A D | chunk.c | 60 struct sctp_chunk *chunk; in sctp_datamsg_free() local 66 sctp_chunk_free(chunk); in sctp_datamsg_free() 86 sctp_chunk_put(chunk); in sctp_datamsg_destroy() 90 asoc = chunk->asoc; in sctp_datamsg_destroy() 111 sctp_chunk_put(chunk); in sctp_datamsg_destroy() 135 chunk->msg = msg; in sctp_datamsg_assign() 264 if (!chunk) { in sctp_datamsg_from_user() 273 chunk->shkey = shkey; in sctp_datamsg_from_user() 276 __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr - in sctp_datamsg_from_user() 308 if (!chunk->has_tsn && in sctp_chunk_abandoned() [all …]
|
| H A D | output.c | 124 if (chunk) in sctp_packet_config() 187 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); in sctp_packet_transmit_chunk() 203 chunk); in sctp_packet_transmit_chunk() 261 if (!chunk->auth) in sctp_packet_bundle_auth() 362 if (chunk->asoc) in __sctp_packet_append_chunk() 492 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; in sctp_packet_pack() 500 skb_put_data(nskb, chunk->skb->data, chunk->skb->len); in sctp_packet_pack() 503 chunk, in sctp_packet_pack() 506 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, in sctp_packet_pack() 507 ntohs(chunk->chunk_hdr->length), chunk->skb->len, in sctp_packet_pack() [all …]
|
| H A D | outqueue.c | 230 sctp_chunk_free(chunk); in __sctp_outq_teardown() 239 sctp_chunk_free(chunk); in __sctp_outq_teardown() 248 sctp_chunk_free(chunk); in __sctp_outq_teardown() 257 sctp_chunk_free(chunk); in __sctp_outq_teardown() 263 sctp_chunk_free(chunk); in __sctp_outq_teardown() 286 chunk && chunk->chunk_hdr ? in sctp_outq_tail() 295 __func__, q, chunk, chunk && chunk->chunk_hdr ? in sctp_outq_tail() 499 if (chunk->transport) in sctp_retransmit_mark() 1104 __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ? in sctp_outq_flush_data() 1107 chunk->skb ? chunk->skb->head : NULL, chunk->skb ? in sctp_outq_flush_data() [all …]
|
| H A D | sm_statefuns.c | 376 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, in sctp_sf_do_5_1B_init() 405 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; in sctp_sf_do_5_1B_init() 426 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), in sctp_sf_do_5_1B_init() 544 chunk->subh.init_hdr = (struct sctp_inithdr *)chunk->skb->data; in sctp_sf_do_5_1C_ack() 549 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, in sctp_sf_do_5_1C_ack() 749 if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - in sctp_sf_do_5_1D_ce() 935 chunk->head_skb ?: chunk->skb)) in sctp_sf_do_5_1E_ca() 1560 (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, in sctp_sf_do_unexpected_init() 1612 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), in sctp_sf_do_unexpected_init() 2280 chunk->head_skb ?: chunk->skb)) { in sctp_sf_do_5_2_4_dupcook() [all …]
|
| H A D | sm_make_chunk.c | 594 if (chunk) in sctp_make_cookie_echo() 635 if (retval && chunk && chunk->transport) in sctp_make_cookie_ack() 693 if (chunk) in sctp_make_cwr() 872 if (chunk) in sctp_make_shutdown() 946 if (chunk && chunk->chunk_hdr && in sctp_make_abort() 1000 if (chunk) in sctp_make_abort_no_data() 1067 chunk->chunk_end = skb_tail_pointer(chunk->skb); in sctp_addto_param() 1217 if (chunk) in sctp_make_heartbeat_ack() 1274 if (chunk) in sctp_make_op_error_space() 1547 chunk->chunk_end = skb_tail_pointer(chunk->skb); in sctp_addto_chunk() [all …]
|
| /linux-6.15/net/sunrpc/xprtrdma/ |
| H A D | svc_rdma_pcl.c | 24 kfree(chunk); in pcl_free() 32 chunk = kmalloc(struct_size(chunk, ch_segments, segcount), GFP_KERNEL); in pcl_alloc_chunk() 33 if (!chunk) in pcl_alloc_chunk() 40 return chunk; in pcl_alloc_chunk() 74 segment = &chunk->ch_segments[chunk->ch_segcount]; in pcl_set_read_segment() 121 if (!chunk) in pcl_alloc_call() 174 if (!chunk) { in pcl_alloc_read() 176 if (!chunk) in pcl_alloc_read() 214 if (!chunk) in pcl_alloc_write() 277 if (!chunk || !chunk->ch_payload_length) in pcl_process_nonpayloads() [all …]
|
| /linux-6.15/mm/ |
| H A D | percpu-vm.c | 17 WARN_ON(chunk->immutable); in pcpu_chunk_page() 229 chunk); in pcpu_map_pages() 339 if (!chunk) in pcpu_create_chunk() 349 chunk->data = vms; in pcpu_create_chunk() 355 return chunk; in pcpu_create_chunk() 360 if (!chunk) in pcpu_destroy_chunk() 366 if (chunk->data) in pcpu_destroy_chunk() 368 pcpu_free_chunk(chunk); in pcpu_destroy_chunk() 397 if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk) in pcpu_should_reclaim_chunk() 406 return ((chunk->isolated && chunk->nr_empty_pop_pages) || in pcpu_should_reclaim_chunk() [all …]
|
| H A D | percpu.c | 219 if (!chunk) in pcpu_addr_in_chunk() 222 start_addr = chunk->base_addr + chunk->start_offset; in pcpu_addr_in_chunk() 601 if (chunk != pcpu_reserved_chunk && !chunk->isolated) in pcpu_update_empty_pages() 1390 bitmap_fill(chunk->populated, chunk->nr_pages); in pcpu_alloc_first_chunk() 1391 chunk->nr_populated = chunk->nr_pages; in pcpu_alloc_first_chunk() 1392 chunk->nr_empty_pop_pages = chunk->nr_pages; in pcpu_alloc_first_chunk() 1431 if (!chunk) in pcpu_alloc_chunk() 1466 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; in pcpu_alloc_chunk() 1486 if (!chunk) in pcpu_free_chunk() 1636 if (likely(chunk && chunk->obj_exts)) { in pcpu_memcg_post_alloc_hook() [all …]
|
| H A D | percpu-km.c | 56 struct pcpu_chunk *chunk; in pcpu_create_chunk() local 61 chunk = pcpu_alloc_chunk(gfp); in pcpu_create_chunk() 62 if (!chunk) in pcpu_create_chunk() 67 pcpu_free_chunk(chunk); in pcpu_create_chunk() 74 chunk->data = pages; in pcpu_create_chunk() 75 chunk->base_addr = page_address(pages); in pcpu_create_chunk() 78 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk() 84 return chunk; in pcpu_create_chunk() 91 if (!chunk) in pcpu_destroy_chunk() 97 if (chunk->data) in pcpu_destroy_chunk() [all …]
|
| H A D | percpu-stats.c | 35 struct pcpu_chunk *chunk; in find_max_nr_alloc() local 55 struct pcpu_block_md *chunk_md = &chunk->chunk_md; in chunk_map_stats() 69 last_alloc = find_last_bit(chunk->alloc_map, in chunk_map_stats() 70 pcpu_chunk_map_bits(chunk) - in chunk_map_stats() 88 if (test_bit(start, chunk->alloc_map)) { in chunk_map_stats() 121 P("nr_alloc", chunk->nr_alloc); in chunk_map_stats() 122 P("max_alloc_size", chunk->max_alloc_size); in chunk_map_stats() 125 P("free_bytes", chunk->free_bytes); in chunk_map_stats() 137 struct pcpu_chunk *chunk; in percpu_stats_show() local 207 if (chunk == pcpu_first_chunk) in percpu_stats_show() [all …]
|
| /linux-6.15/drivers/s390/cio/ |
| H A D | itcw.c | 183 void *chunk; in itcw_init() local 197 return chunk; in itcw_init() 198 itcw = chunk; in itcw_init() 213 return chunk; in itcw_init() 221 return chunk; in itcw_init() 231 return chunk; in itcw_init() 239 return chunk; in itcw_init() 245 return chunk; in itcw_init() 252 return chunk; in itcw_init() 259 return chunk; in itcw_init() [all …]
|
| /linux-6.15/kernel/trace/ |
| H A D | pid_list.c | 25 chunk->next = NULL; in get_lower_chunk() 33 return chunk; in get_lower_chunk() 57 return chunk; in get_upper_chunk() 357 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); in pid_list_refill_irq() 358 if (!chunk) in pid_list_refill_irq() 368 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); in pid_list_refill_irq() 369 if (!chunk) in pid_list_refill_irq() 430 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); in trace_pid_list_alloc() 431 if (!chunk) in trace_pid_list_alloc() 441 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); in trace_pid_list_alloc() [all …]
|
| /linux-6.15/fs/xfs/ |
| H A D | xfs_zone_gc.c | 702 chunk->ip = ip; in xfs_zone_gc_start_chunk() 711 chunk->oz = oz; in xfs_zone_gc_start_chunk() 715 bio_add_folio_nofail(bio, chunk->scratch->folio, chunk->len, in xfs_zone_gc_start_chunk() 717 chunk->scratch->offset += chunk->len; in xfs_zone_gc_start_chunk() 749 chunk->bio.bi_iter.bi_sector = chunk->new_daddr; in xfs_zone_gc_submit_write() 825 bio_add_folio_nofail(&chunk->bio, chunk->scratch->folio, chunk->len, in xfs_zone_gc_write_chunk() 849 chunk->scratch->freed += chunk->len; in xfs_zone_gc_finish_chunk() 850 if (chunk->scratch->freed == chunk->scratch->offset) { in xfs_zone_gc_finish_chunk() 872 chunk->new_daddr = chunk->bio.bi_iter.bi_sector; in xfs_zone_gc_finish_chunk() 873 error = xfs_zoned_end_io(ip, chunk->offset, chunk->len, in xfs_zone_gc_finish_chunk() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | icm.c | 92 kfree(chunk); in mlx4_free_icm() 160 if (!chunk) { in mlx4_alloc_icm() 161 chunk = kzalloc_node(sizeof(*chunk), in mlx4_alloc_icm() 165 if (!chunk) { in mlx4_alloc_icm() 166 chunk = kzalloc(sizeof(*chunk), in mlx4_alloc_icm() 169 if (!chunk) in mlx4_alloc_icm() 188 &chunk->buf[chunk->npages], in mlx4_alloc_icm() 191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], in mlx4_alloc_icm() 205 ++chunk->nsg; in mlx4_alloc_icm() 208 chunk->sg, chunk->npages, in mlx4_alloc_icm() [all …]
|
| /linux-6.15/sound/soc/codecs/ |
| H A D | ntpfw.c | 47 if (buf_size <= sizeof(*chunk)) { in ntpfw_verify_chunk() 52 if (chunk->step != 2 && chunk->step != 5) { in ntpfw_verify_chunk() 57 chunk_size = be16_to_cpu(chunk->length); in ntpfw_verify_chunk() 63 if (chunk_size % chunk->step) { in ntpfw_verify_chunk() 78 ret = i2c_master_send(i2c, &chunk->data[i], chunk->step); in ntpfw_send_chunk() 79 if (ret != chunk->step) { in ntpfw_send_chunk() 91 const struct ntpfw_chunk *chunk; in ntpfw_load() local 113 chunk = (struct ntpfw_chunk *)data; in ntpfw_load() 120 ret = ntpfw_send_chunk(i2c, chunk); in ntpfw_load() 124 data += be16_to_cpu(chunk->length) + sizeof(*chunk); in ntpfw_load() [all …]
|
| /linux-6.15/kernel/ |
| H A D | audit_tree.c | 136 kfree(chunk); in free_chunk() 194 if (!chunk) in alloc_chunk() 205 return chunk; in alloc_chunk() 286 audit_mark(mark)->chunk = chunk; in replace_mark_chunk() 287 if (chunk) in replace_mark_chunk() 402 if (!chunk) { in create_chunk() 410 kfree(chunk); in create_chunk() 417 kfree(chunk); in create_chunk() 489 if (!chunk) { in tag_chunk() 503 p = &chunk->owners[chunk->count - 1]; in tag_chunk() [all …]
|
| /linux-6.15/lib/ |
| H A D | genalloc.c | 42 return chunk->end_addr - chunk->start_addr + 1; in chunk_size() 196 chunk->phys_addr = phys; in gen_pool_add_owner() 197 chunk->start_addr = virt; in gen_pool_add_owner() 199 chunk->owner = owner; in gen_pool_add_owner() 224 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { in gen_pool_virt_to_phys() 225 paddr = chunk->phys_addr + (addr - chunk->start_addr); in gen_pool_virt_to_phys() 257 vfree(chunk); in gen_pool_destroy() 320 *owner = chunk->owner; in gen_pool_alloc_algo_owner() 504 if (addr >= chunk->start_addr && addr <= chunk->end_addr) { in gen_pool_free_owner() 512 *owner = chunk->owner; in gen_pool_free_owner() [all …]
|
| /linux-6.15/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_ring_mux.c | 104 if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) { in amdgpu_mux_resubmit_chunks() 106 chunk->sync_seq, in amdgpu_mux_resubmit_chunks() 108 if (chunk->sync_seq == in amdgpu_mux_resubmit_chunks() 119 chunk->start, in amdgpu_mux_resubmit_chunks() 120 chunk->end); in amdgpu_mux_resubmit_chunks() 183 list_del(&chunk->entry); in amdgpu_ring_mux_fini() 448 if (!chunk) { in amdgpu_ring_mux_start_ib() 453 chunk->start = ring->wptr; in amdgpu_ring_mux_start_ib() 497 if (!chunk) { in amdgpu_ring_mux_ib_mark_offset() 530 if (!chunk) { in amdgpu_ring_mux_end_ib() [all …]
|
| /linux-6.15/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_dmem.c | 99 return chunk->drm; in page_to_drm() 121 chunk->callocated--; in nouveau_dmem_page_free() 237 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); in nouveau_dmem_chunk_alloc() 238 if (chunk == NULL) { in nouveau_dmem_chunk_alloc() 251 chunk->drm = drm; in nouveau_dmem_chunk_alloc() 261 &chunk->bo); in nouveau_dmem_chunk_alloc() 287 chunk->callocated++; in nouveau_dmem_chunk_alloc() 302 kfree(chunk); in nouveau_dmem_chunk_alloc() 319 chunk->callocated++; in nouveau_dmem_page_alloc_locked() 432 list_del(&chunk->list); in nouveau_dmem_fini() [all …]
|
| /linux-6.15/drivers/infiniband/hw/irdma/ |
| H A D | pble.c | 18 struct irdma_chunk *chunk; in irdma_destroy_pble_prm() local 23 list_del(&chunk->list); in irdma_destroy_pble_prm() 27 kfree(chunk->chunkmem.va); in irdma_destroy_pble_prm() 90 struct irdma_chunk *chunk = info->chunk; in add_sd_direct() local 112 chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr); in add_sd_direct() 145 struct irdma_chunk *chunk = info->chunk; in add_bp_pages() local 160 addr = chunk->vaddr; in add_bp_pages() 230 chunk = chunkmem.va; in add_pble_prm() 233 chunk->dev = dev; in add_pble_prm() 240 info.chunk = chunk; in add_pble_prm() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/ |
| H A D | dr_icm_pool.c | 84 return (u64)offset * chunk->seg; in mlx5dr_icm_pool_get_chunk_mr_addr() 96 return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg; in mlx5dr_icm_pool_get_chunk_icm_addr() 102 chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_byte_size() 221 memset(chunk->ste_arr, 0, in dr_icm_chunk_ste_init() 329 chunk->seg = seg; in dr_icm_chunk_init() 330 chunk->size = chunk_size; in dr_icm_chunk_init() 331 chunk->buddy_mem = buddy_mem_pool; in dr_icm_chunk_init() 453 if (!chunk) in mlx5dr_icm_alloc_chunk() 464 return chunk; in mlx5dr_icm_alloc_chunk() 483 hot_chunk->seg = chunk->seg; in mlx5dr_icm_free_chunk() [all …]
|
| /linux-6.15/drivers/infiniband/hw/mthca/ |
| H A D | mthca_memfree.c | 69 dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages, in mthca_free_icm_pages() 101 kfree(chunk); in mthca_free_icm() 158 if (!chunk) { in mthca_alloc_icm() 159 chunk = kmalloc(sizeof *chunk, in mthca_alloc_icm() 161 if (!chunk) in mthca_alloc_icm() 175 &chunk->mem[chunk->npages], in mthca_alloc_icm() 178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm() 185 ++chunk->nsg; in mthca_alloc_icm() 187 chunk->nsg = in mthca_alloc_icm() 197 chunk = NULL; in mthca_alloc_icm() [all …]
|
| /linux-6.15/include/net/sctp/ |
| H A D | sm.h | 177 const struct sctp_chunk *chunk); 179 const struct sctp_chunk *chunk); 182 const struct sctp_chunk *chunk); 200 const struct sctp_chunk *chunk); 212 const struct sctp_chunk *chunk, 217 const struct sctp_chunk *chunk, 221 const struct sctp_chunk *chunk); 224 const struct sctp_chunk *chunk); 271 struct sctp_chunk *chunk, 321 struct sctp_chunk *chunk, [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | pool.c | 138 mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order); in hws_pool_buddy_db_put_chunk() 231 &chunk->resource_idx, in hws_pool_buddy_db_get_chunk() 232 &chunk->offset); in hws_pool_buddy_db_get_chunk() 235 chunk->order); in hws_pool_buddy_db_get_chunk() 414 &chunk->resource_idx, in hws_pool_general_element_db_get_chunk() 415 &chunk->offset); in hws_pool_general_element_db_get_chunk() 418 chunk->order); in hws_pool_general_element_db_get_chunk() 496 &chunk->resource_idx, in hws_onesize_element_db_get_chunk() 497 &chunk->offset); in hws_onesize_element_db_get_chunk() 500 chunk->order); in hws_onesize_element_db_get_chunk() [all …]
|