| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| H A D | crypto.c | 23 ({ typeof(bulk) _bulk = (bulk); \ 296 bulk = kzalloc(sizeof(*bulk), GFP_KERNEL); in mlx5_crypto_dek_bulk_create() 297 if (!bulk) in mlx5_crypto_dek_bulk_create() 324 return bulk; in mlx5_crypto_dek_bulk_create() 329 kfree(bulk); in mlx5_crypto_dek_bulk_create() 379 if (bulk) { in mlx5_crypto_dek_pool_pop() 408 bulk->avail_start = bulk->num_deks; in mlx5_crypto_dek_pool_pop() 429 struct mlx5_crypto_dek_bulk *bulk = dek->bulk; in mlx5_crypto_dek_free_locked() local 443 if (!bulk->avail_deks && !bulk->in_use_deks) in mlx5_crypto_dek_free_locked() 572 bulk->avail_deks = bulk->num_deks; in mlx5_crypto_dek_pool_reset_synced() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | fs_pool.c | 31 return bitmap_weight(bulk->bitmask, bulk->bulk_len); in mlx5_fs_bulk_get_free_amount() 78 struct mlx5_fs_bulk *bulk; in mlx5_fs_pool_cleanup() local 82 pool->ops->bulk_destroy(dev, bulk); in mlx5_fs_pool_cleanup() 84 pool->ops->bulk_destroy(dev, bulk); in mlx5_fs_pool_cleanup() 86 pool->ops->bulk_destroy(dev, bulk); in mlx5_fs_pool_cleanup() 107 fs_pool->available_units -= bulk->bulk_len; in mlx5_fs_pool_free_bulk() 108 fs_pool->ops->bulk_destroy(dev, bulk); in mlx5_fs_pool_free_bulk() 184 if (bulk_free_amount == bulk->bulk_len) { in mlx5_fs_pool_release_index() 185 list_del(&bulk->pool_list); in mlx5_fs_pool_release_index() 187 mlx5_fs_pool_free_bulk(fs_pool, bulk); in mlx5_fs_pool_release_index() [all …]
|
| H A D | fs_counters.c | 166 if (counter->bulk) in mlx5_fc_release() 417 static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk, in mlx5_fc_init() argument 420 counter->bulk = bulk; in mlx5_fc_init() 426 return counter->bulk->base_id; in mlx5_fc_get_base_id() 526 struct mlx5_fs_bulk *fs_bulk = &fc->bulk->fs_bulk; in mlx5_fc_pool_release_counter() 531 pool_index.index = fc->id - fc->bulk->base_id; in mlx5_fc_pool_release_counter() 565 counter->bulk = fc_bulk; in mlx5_fc_local_create() 575 kfree(counter->bulk); in mlx5_fc_local_destroy()
|
| H A D | fs_pool.h | 23 int (*bulk_destroy)(struct mlx5_core_dev *dev, struct mlx5_fs_bulk *bulk); 45 int mlx5_fs_bulk_get_free_amount(struct mlx5_fs_bulk *bulk);
|
| /linux-6.15/drivers/gpu/drm/ttm/ |
| H A D | ttm_resource.c | 44 cursor->bulk = NULL; in ttm_resource_cursor_clear_bulk() 56 if (WARN_ON_ONCE(bulk != cursor->bulk)) { in ttm_resource_cursor_move_bulk_tail() 125 memset(bulk, 0, sizeof(*bulk)); in ttm_lru_bulk_move_init() 126 INIT_LIST_HEAD(&bulk->cursor_list); in ttm_lru_bulk_move_init() 139 struct ttm_lru_bulk_move *bulk) in ttm_lru_bulk_move_fini() argument 142 ttm_bulk_move_drop_cursors(bulk); in ttm_lru_bulk_move_fini() 159 ttm_bulk_move_adjust_cursors(bulk); in ttm_lru_bulk_move_tail() 621 bulk = bo->bulk_move; in ttm_resource_cursor_check_bulk() 623 if (cursor->bulk != bulk) { in ttm_resource_cursor_check_bulk() 624 if (bulk) { in ttm_resource_cursor_check_bulk() [all …]
|
| /linux-6.15/drivers/staging/vc04_services/interface/ |
| H A D | TESTING | 49 Testing bulk transfer for alignment. 50 Testing bulk transfer at PAGE_SIZE. 61 vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us 62 vchi bulk (size 0, 0 oneway) -> 230.000000us 65 vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us 66 vchi bulk (size 0, 0 oneway) -> 266.000000us 68 vchi bulk (size 0, 0 oneway) -> 456.000000us 70 vchi bulk (size 0, 0 oneway) -> 640.000000us 72 vchi bulk (size 0, 0 oneway) -> 2309.000000us 78 vchi bulk (size 0, 0 oneway) -> nanus [all …]
|
| /linux-6.15/drivers/staging/vc04_services/vchiq-mmal/ |
| H A D | mmal-vchiq.c | 150 } bulk; /* bulk data */ member 270 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance, in buffer_work_cb() 271 msg_context->u.bulk.port, in buffer_work_cb() 367 msg_context->u.bulk.buffer_used = in inline_receive() 397 msg_context->u.bulk.port = port; in buffer_from_host() 398 msg_context->u.bulk.buffer = buf; in buffer_from_host() 477 msg_context->u.bulk.mmal_flags = in buffer_to_host_cb() 490 msg_context->u.bulk.status = in buffer_to_host_cb() 498 msg_context->u.bulk.status = 0; in buffer_to_host_cb() 503 msg_context->u.bulk.status = in buffer_to_host_cb() [all …]
|
| /linux-6.15/drivers/staging/vc04_services/interface/vchiq_arm/ |
| H A D | vchiq_core.c | 468 if (bulk) { in make_service_callback() 1330 bulk->actual); in service_notify_bulk() 1334 bulk->actual); in service_notify_bulk() 1518 if (bulk->offset) in create_pagelist() 1569 if (bulk->offset) { in create_pagelist() 1778 if (bulk && bulk->remote_data && bulk->actual) in vchiq_complete_bulk() 1817 service->remoteport, bulk->size, bulk->remote_size); in abort_outstanding_bulks() 1821 bulk->size = 0; in abort_outstanding_bulks() 2125 localport, bulk->actual, &bulk->dma_addr); in parse_message() 3096 dir_char, bulk->size, &bulk->dma_addr, bulk->cb_data); in vchiq_bulk_xfer_queue_msg_killable() [all …]
|
| H A D | vchiq_arm.c | 569 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; in vchiq_blocking_bulk_transfer() local 571 if (bulk) { in vchiq_blocking_bulk_transfer() 574 if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) || in vchiq_blocking_bulk_transfer() 575 (bulk->size != bulk_params->size)) { in vchiq_blocking_bulk_transfer() 581 bulk->waiter = NULL; in vchiq_blocking_bulk_transfer() 594 if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { in vchiq_blocking_bulk_transfer() 595 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; in vchiq_blocking_bulk_transfer() local 597 if (bulk) { in vchiq_blocking_bulk_transfer() 600 bulk->waiter = NULL; in vchiq_blocking_bulk_transfer()
|
| /linux-6.15/drivers/gpu/drm/msm/ |
| H A D | msm_io_utils.c | 17 struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count, in msm_clk_bulk_get_clock() argument 25 for (i = 0; bulk && i < count; i++) { in msm_clk_bulk_get_clock() 26 if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n)) in msm_clk_bulk_get_clock() 27 return bulk[i].clk; in msm_clk_bulk_get_clock()
|
| H A D | msm_mdss.c | 415 struct clk_bulk_data *bulk; in mdp5_mdss_parse_clock() local 422 bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL); in mdp5_mdss_parse_clock() 423 if (!bulk) in mdp5_mdss_parse_clock() 426 bulk[num_clocks++].id = "iface"; in mdp5_mdss_parse_clock() 427 bulk[num_clocks++].id = "bus"; in mdp5_mdss_parse_clock() 428 bulk[num_clocks++].id = "vsync"; in mdp5_mdss_parse_clock() 430 ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk); in mdp5_mdss_parse_clock() 434 *clocks = bulk; in mdp5_mdss_parse_clock()
|
| /linux-6.15/Documentation/w1/masters/ |
| H A D | ds2490.rst | 49 - The ds2490 specification doesn't cover short bulk in reads in 51 available, the bulk read will return an error and the hardware will 52 clear the entire bulk in buffer. It would be possible to read the 63 most of the time one of the bulk out or in, and usually the bulk in 64 would fail. qemu sets a 50ms timeout and the bulk in would timeout 65 even when the status shows data available. A bulk out write would
|
| /linux-6.15/drivers/media/usb/uvc/ |
| H A D | uvc_video.c | 1397 nbytes = min(stream->bulk.max_payload_size - stream->bulk.payload_size, in uvc_video_encode_data() 1597 if (stream->bulk.header_size == 0 && !stream->bulk.skip_payload) { in uvc_video_decode_bulk() 1633 stream->bulk.payload_size >= stream->bulk.max_payload_size) { in uvc_video_decode_bulk() 1636 stream->bulk.payload_size); in uvc_video_decode_bulk() 1641 stream->bulk.header_size = 0; in uvc_video_decode_bulk() 1642 stream->bulk.skip_payload = 0; in uvc_video_decode_bulk() 1677 stream->bulk.payload_size == stream->bulk.max_payload_size) { in uvc_video_encode_bulk() 1686 stream->bulk.header_size = 0; in uvc_video_encode_bulk() 2033 stream->bulk.header_size = 0; in uvc_video_start_transfer() 2034 stream->bulk.skip_payload = 0; in uvc_video_start_transfer() [all …]
|
| /linux-6.15/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| H A D | fs_hws_pools.c | 128 pr_bulk->prs_data[i].bulk = pr_bulk; in mlx5_fs_hws_pr_bulk_create() 225 struct mlx5_fs_bulk *fs_bulk = &pr_data->bulk->fs_bulk; in mlx5_fs_hws_pr_pool_release_pr() 237 return pr_data->bulk->hws_action; in mlx5_fs_hws_pr_get_action() 282 mh_bulk->mhs_data[i].bulk = mh_bulk; in mlx5_fs_hws_mh_bulk_create() 374 struct mlx5_fs_bulk *fs_bulk = &mh_data->bulk->fs_bulk; in mlx5_fs_hws_mh_pool_release_mh() 409 struct mlx5_fc_bulk *fc_bulk = counter->bulk; in mlx5_fc_get_hws_action() 420 mlx5_fs_put_hws_action(&counter->bulk->hws_data); in mlx5_fc_put_hws_action()
|
| H A D | fs_hws_pools.h | 21 struct mlx5_fs_hws_pr_bulk *bulk; member 40 struct mlx5_fs_hws_mh_bulk *bulk; member
|
| /linux-6.15/Documentation/ABI/testing/ |
| H A D | sysfs-driver-w1_therm | 75 * If a bulk read has been triggered, it will directly 76 return the temperature computed when the bulk read 80 * If no bulk read has been triggered, it will trigger 115 (RW) trigger a bulk read conversion. read the status 124 no bulk operation. Reading temperature will 128 'trigger': trigger a bulk read on all supporting 131 Note that if a bulk read is sent but one sensor is not read 134 of the bulk read command (not the current temperature).
|
| /linux-6.15/include/drm/ttm/ |
| H A D | ttm_resource.h | 332 struct ttm_lru_bulk_move *bulk; member 428 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk); 429 void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk); 431 struct ttm_lru_bulk_move *bulk);
|
| /linux-6.15/drivers/remoteproc/ |
| H A D | qcom_wcnss.c | 447 struct regulator_bulk_data *bulk; in wcnss_init_regulators() local 465 bulk = devm_kcalloc(wcnss->dev, in wcnss_init_regulators() 468 if (!bulk) in wcnss_init_regulators() 472 bulk[i].supply = info[i].name; in wcnss_init_regulators() 474 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk); in wcnss_init_regulators() 480 regulator_set_voltage(bulk[i].consumer, in wcnss_init_regulators() 485 regulator_set_load(bulk[i].consumer, info[i].load_uA); in wcnss_init_regulators() 488 wcnss->vregs = bulk; in wcnss_init_regulators()
|
| /linux-6.15/drivers/media/usb/dvb-usb-v2/ |
| H A D | usb_urb.c | 155 stream->props.u.bulk.buffersize, in usb_urb_alloc_bulk_urbs() 260 buf_size = stream->props.u.bulk.buffersize; in usb_urb_reconfig() 281 props->u.bulk.buffersize == in usb_urb_reconfig() 282 stream->props.u.bulk.buffersize) in usb_urb_reconfig() 326 stream->props.u.bulk.buffersize); in usb_urb_initv2()
|
| /linux-6.15/Documentation/driver-api/usb/ |
| H A D | bulk-streams.rst | 1 USB bulk streams 8 device driver to overload a bulk endpoint so that multiple transfers can be 41 ID for the bulk IN and OUT endpoints used in a Bi-directional command sequence. 46 declares how many stream IDs it can support, and each bulk endpoint on a
|
| /linux-6.15/drivers/media/usb/dvb-usb/ |
| H A D | dtt200u.c | 213 .bulk = { 265 .bulk = { 317 .bulk = { 369 .bulk = {
|
| H A D | dibusb-mb.c | 224 .bulk = { 314 .bulk = { 383 .bulk = { 445 .bulk = {
|
| /linux-6.15/net/core/ |
| H A D | page_pool.c | 530 const int bulk = PP_ALLOC_CACHE_REFILL; in __page_pool_alloc_pages_slow() local 545 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_pages_slow() 547 nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk, in __page_pool_alloc_pages_slow() 851 netmem_ref *bulk, in page_pool_recycle_ring_bulk() argument 861 if (__ptr_ring_produce(&pool->ring, (__force void *)bulk[i])) { in page_pool_recycle_ring_bulk() 880 page_pool_return_page(pool, bulk[i]); in page_pool_recycle_ring_bulk() 910 netmem_ref bulk[XDP_BULK_QUEUE_SIZE]; in page_pool_put_netmem_bulk() local 938 bulk[bulk_len++] = netmem; in page_pool_put_netmem_bulk() 942 page_pool_recycle_ring_bulk(pool, bulk, bulk_len); in page_pool_put_netmem_bulk()
|
| /linux-6.15/lib/ |
| H A D | test_objpool.c | 72 int bulk[2]; /* for thread and irq */ member 207 item->bulk[0] = test->bulk_normal; in ot_init_cpu_item() 208 item->bulk[1] = test->bulk_irq; in ot_init_cpu_item() 329 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_sync() 506 for (i = 0; i < item->bulk[irq]; i++) in ot_bulk_async()
|
| /linux-6.15/Documentation/usb/ |
| H A D | ehci.rst | 58 At this writing the driver should comfortably handle all control, bulk, 125 and bulk transfers. Shows each active qh and the qtds 161 good to keep in mind that bulk transfers are always in 512 byte packets, 165 So more than 50 MByte/sec is available for bulk transfers, when both 195 you issue a control or bulk request you can often expect to learn that 203 or using bulk queuing if a series of small requests needs to be issued. 213 I/O be efficient, it's better to just queue up several (bulk) requests
|