| /f-stack/dpdk/drivers/crypto/scheduler/ |
| H A D | scheduler_failover.c | 29 for (i = 0; i < nb_ops && i < 4; i++) in failover_worker_enqueue() 33 worker->qp_id, ops, nb_ops); in failover_worker_enqueue() 46 if (unlikely(nb_ops == 0)) in schedule_enqueue() 50 ops, nb_ops); in schedule_enqueue() 52 if (enqueued_ops < nb_ops) in schedule_enqueue() 56 nb_ops - enqueued_ops); in schedule_enqueue() 64 uint16_t nb_ops) in schedule_enqueue_ordering() argument 69 nb_ops); in schedule_enqueue_ordering() 90 worker->qp_id, ops, nb_ops); in schedule_dequeue() 96 if (nb_deq_ops == nb_ops) in schedule_dequeue() [all …]
|
| H A D | scheduler_pkt_size_distr.c | 53 if (unlikely(nb_ops == 0)) in schedule_enqueue() 61 for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { in schedule_enqueue() 85 i = nb_ops; in schedule_enqueue() 99 i = nb_ops; in schedule_enqueue() 113 i = nb_ops; in schedule_enqueue() 127 i = nb_ops; in schedule_enqueue() 135 for (; i < nb_ops; i++) { in schedule_enqueue() 143 i = nb_ops; in schedule_enqueue() 175 uint16_t nb_ops) in schedule_enqueue_ordering() argument 180 nb_ops); in schedule_enqueue_ordering() [all …]
|
| H A D | scheduler_pmd_private.h | 68 get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) in get_max_enqueue_order_count() argument 72 return count > nb_ops ? nb_ops : count; in get_max_enqueue_order_count() 77 struct rte_crypto_op **ops, uint16_t nb_ops) in scheduler_order_insert() argument 79 rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL); in scheduler_order_insert() 84 struct rte_crypto_op **ops, uint16_t nb_ops) in scheduler_order_drain() argument 90 nb_ops, NULL); in scheduler_order_drain()
|
| H A D | scheduler_roundrobin.c | 20 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) in schedule_enqueue() argument 28 if (unlikely(nb_ops == 0)) in schedule_enqueue() 31 for (i = 0; i < nb_ops && i < 4; i++) in schedule_enqueue() 35 worker->qp_id, ops, nb_ops); in schedule_enqueue() 47 uint16_t nb_ops) in schedule_enqueue_ordering() argument 52 nb_ops); in schedule_enqueue_ordering() 63 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) in schedule_dequeue() argument 88 worker->qp_id, ops, nb_ops); in schedule_dequeue() 102 uint16_t nb_ops) in schedule_dequeue_ordering() argument 107 schedule_dequeue(qp, ops, nb_ops); in schedule_dequeue_ordering() [all …]
|
| H A D | scheduler_multicore.c | 47 if (unlikely(nb_ops == 0)) in schedule_enqueue() 50 for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) { in schedule_enqueue() 53 (void *)(&ops[processed_ops]), nb_ops, NULL); in schedule_enqueue() 55 nb_ops -= nb_queue_ops; in schedule_enqueue() 68 uint16_t nb_ops) in schedule_enqueue_ordering() argument 73 nb_ops); in schedule_enqueue_ordering() 92 for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) { in schedule_dequeue() 95 (void *)(&ops[processed_ops]), nb_ops, NULL); in schedule_dequeue() 97 nb_ops -= nb_deq_ops; in schedule_dequeue() 111 uint16_t nb_ops) in schedule_dequeue_ordering() argument [all …]
|
| /f-stack/dpdk/lib/librte_compressdev/ |
| H A D | rte_comp.c | 91 struct rte_comp_op **ops, uint16_t nb_ops) in rte_comp_op_raw_bulk_alloc() argument 93 if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0) in rte_comp_op_raw_bulk_alloc() 94 return nb_ops; in rte_comp_op_raw_bulk_alloc() 187 struct rte_comp_op **ops, uint16_t nb_ops) in rte_comp_op_bulk_alloc() argument 192 retval = rte_comp_op_raw_bulk_alloc(mempool, ops, nb_ops); in rte_comp_op_bulk_alloc() 193 if (unlikely(retval != nb_ops)) in rte_comp_op_bulk_alloc() 196 for (i = 0; i < nb_ops; i++) in rte_comp_op_bulk_alloc() 199 return nb_ops; in rte_comp_op_bulk_alloc() 218 rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops) in rte_comp_op_bulk_free() argument 222 for (i = 0; i < nb_ops; i++) { in rte_comp_op_bulk_free()
|
| H A D | rte_compressdev_internal.h | 42 struct rte_comp_op **ops, uint16_t nb_ops); 64 struct rte_comp_op **ops, uint16_t nb_ops);
|
| /f-stack/dpdk/lib/librte_cryptodev/ |
| H A D | rte_cryptodev_trace_fp.h | 17 uint16_t nb_ops), 21 rte_trace_point_emit_u16(nb_ops); 27 uint16_t nb_ops), 31 rte_trace_point_emit_u16(nb_ops);
|
| H A D | rte_crypto.h | 220 struct rte_crypto_op **ops, uint16_t nb_ops) in __rte_crypto_op_raw_bulk_alloc() argument 229 if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0) in __rte_crypto_op_raw_bulk_alloc() 230 return nb_ops; in __rte_crypto_op_raw_bulk_alloc() 278 struct rte_crypto_op **ops, uint16_t nb_ops) in rte_crypto_op_bulk_alloc() argument 282 if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops) in rte_crypto_op_bulk_alloc() 283 != nb_ops)) in rte_crypto_op_bulk_alloc() 286 for (i = 0; i < nb_ops; i++) in rte_crypto_op_bulk_alloc() 289 return nb_ops; in rte_crypto_op_bulk_alloc()
|
| H A D | rte_cryptodev.h | 827 struct rte_crypto_op **ops, uint16_t nb_ops); 831 struct rte_crypto_op **ops, uint16_t nb_ops); 944 struct rte_crypto_op **ops, uint16_t nb_ops) in rte_cryptodev_dequeue_burst() argument 948 nb_ops = (*dev->dequeue_burst) in rte_cryptodev_dequeue_burst() 949 (dev->data->queue_pairs[qp_id], ops, nb_ops); in rte_cryptodev_dequeue_burst() 951 rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops); in rte_cryptodev_dequeue_burst() 952 return nb_ops; in rte_cryptodev_dequeue_burst() 988 struct rte_crypto_op **ops, uint16_t nb_ops) in rte_cryptodev_enqueue_burst() argument 992 rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops); in rte_cryptodev_enqueue_burst() 994 dev->data->queue_pairs[qp_id], ops, nb_ops); in rte_cryptodev_enqueue_burst()
|
| /f-stack/dpdk/drivers/baseband/null/ |
| H A D | bbdev_null.c | 143 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) in enqueue_dec_ops() argument 147 (void **)ops, nb_ops, NULL); in enqueue_dec_ops() 149 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; in enqueue_dec_ops() 158 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) in enqueue_enc_ops() argument 162 (void **)ops, nb_ops, NULL); in enqueue_enc_ops() 164 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; in enqueue_enc_ops() 173 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) in dequeue_dec_ops() argument 177 (void **)ops, nb_ops, NULL); in dequeue_dec_ops() 186 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) in dequeue_enc_ops() argument 190 (void **)ops, nb_ops, NULL); in dequeue_enc_ops()
|
| /f-stack/dpdk/drivers/crypto/ccp/ |
| H A D | rte_ccp_pmd.c | 197 uint16_t nb_ops) in ccp_pmd_enqueue_burst() argument 204 uint16_t tmp_ops = nb_ops, b_idx, cur_ops = 0; in ccp_pmd_enqueue_burst() 206 if (nb_ops == 0) in ccp_pmd_enqueue_burst() 212 cur_ops = nb_ops / cryptodev_cnt + (nb_ops)%cryptodev_cnt; in ccp_pmd_enqueue_burst() 216 b_idx = nb_ops - tmp_ops; in ccp_pmd_enqueue_burst() 240 nb_ops, slots_req, b_idx); in ccp_pmd_enqueue_burst() 250 uint16_t nb_ops) in ccp_pmd_dequeue_burst() argument 255 nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops, &total_nb_ops); in ccp_pmd_dequeue_burst() 260 ops, nb_ops, &total_nb_ops); in ccp_pmd_dequeue_burst()
|
| /f-stack/dpdk/drivers/crypto/bcmfs/ |
| H A D | bcmfs_sym_pmd.c | 248 uint16_t nb_ops) in bcmfs_sym_pmd_enqueue_op_burst() argument 257 if (nb_ops == 0) in bcmfs_sym_pmd_enqueue_op_burst() 260 if (nb_ops > BCMFS_MAX_REQS_BUFF) in bcmfs_sym_pmd_enqueue_op_burst() 261 nb_ops = BCMFS_MAX_REQS_BUFF; in bcmfs_sym_pmd_enqueue_op_burst() 264 if (nb_ops > (qp->nb_descriptors - qp->nb_pending_requests)) in bcmfs_sym_pmd_enqueue_op_burst() 265 nb_ops = qp->nb_descriptors - qp->nb_pending_requests; in bcmfs_sym_pmd_enqueue_op_burst() 267 for (i = 0; i < nb_ops; i++) { in bcmfs_sym_pmd_enqueue_op_burst() 316 uint16_t nb_ops) in bcmfs_sym_pmd_dequeue_op_burst() argument 324 if (nb_ops > BCMFS_MAX_REQS_BUFF) in bcmfs_sym_pmd_dequeue_op_burst() 325 nb_ops = BCMFS_MAX_REQS_BUFF; in bcmfs_sym_pmd_dequeue_op_burst() [all …]
|
| H A D | bcmfs_qp.h | 119 uint16_t nb_ops); 127 bcmfs_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); 129 bcmfs_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
|
| H A D | bcmfs_qp.c | 306 bcmfs_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) in bcmfs_enqueue_op_burst() argument 310 uint16_t nb_ops_possible = nb_ops; in bcmfs_enqueue_op_burst() 313 if (unlikely(nb_ops == 0)) in bcmfs_enqueue_op_burst() 338 bcmfs_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) in bcmfs_dequeue_op_burst() argument 341 uint32_t deq = tmp_qp->ops->dequeue(tmp_qp, ops, nb_ops); in bcmfs_dequeue_op_burst()
|
| /f-stack/dpdk/drivers/crypto/mvsam/ |
| H A D | rte_mrvl_pmd.c | 664 uint16_t nb_ops) in mrvl_crypto_pmd_enqueue_burst() argument 678 if (nb_ops == 0) in mrvl_crypto_pmd_enqueue_burst() 686 for (; iter_ops < nb_ops; ++iter_ops) { in mrvl_crypto_pmd_enqueue_burst() 743 uint16_t nb_ops) in mrvl_crypto_pmd_dequeue_burst() argument 748 struct sam_cio_op_result results[nb_ops]; in mrvl_crypto_pmd_dequeue_burst() 751 ret = sam_cio_deq(cio, results, &nb_ops); in mrvl_crypto_pmd_dequeue_burst() 754 qp->stats.dequeue_err_count += nb_ops; in mrvl_crypto_pmd_dequeue_burst() 757 qp->stats.dequeued_count += nb_ops; in mrvl_crypto_pmd_dequeue_burst() 763 for (i = 0; i < nb_ops; ++i) { in mrvl_crypto_pmd_dequeue_burst() 782 qp->stats.dequeued_count += nb_ops; in mrvl_crypto_pmd_dequeue_burst() [all …]
|
| /f-stack/dpdk/drivers/common/qat/ |
| H A D | qat_qp.h | 84 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); 87 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops); 90 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
|
| H A D | qat_qp.c | 579 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) in qat_enqueue_op_burst() argument 585 uint16_t nb_ops_possible = nb_ops; in qat_enqueue_op_burst() 589 if (unlikely(nb_ops == 0)) in qat_enqueue_op_burst() 612 if ((inflights + nb_ops) > tmp_qp->max_inflights) { in qat_enqueue_op_burst() 682 uint16_t nb_ops_possible = nb_ops; in qat_enqueue_comp_op_burst() 690 if (unlikely(nb_ops == 0)) in qat_enqueue_comp_op_burst() 714 overflow = (inflights + nb_ops) - tmp_qp->max_inflights; in qat_enqueue_comp_op_burst() 716 nb_ops_possible = nb_ops - overflow; in qat_enqueue_comp_op_burst() 743 nb_ops, nb_remaining_descriptors); in qat_enqueue_comp_op_burst() 846 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) in qat_dequeue_op_burst() argument [all …]
|
| /f-stack/dpdk/drivers/crypto/octeontx/ |
| H A D | otx_cryptodev_ops.c | 634 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops, in otx_cpt_pkt_enqueue() argument 644 if (nb_ops > count) in otx_cpt_pkt_enqueue() 645 nb_ops = count; in otx_cpt_pkt_enqueue() 648 while (likely(count < nb_ops)) { in otx_cpt_pkt_enqueue() 664 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_ASYM); in otx_cpt_enqueue_asym() 668 otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) in otx_cpt_enqueue_sym() argument 670 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM); in otx_cpt_enqueue_sym() 826 uint8_t cc[nb_ops]; in otx_cpt_pkt_dequeue() 836 count = (nb_ops > pcount) ? pcount : nb_ops; in otx_cpt_pkt_dequeue() 915 return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_ASYM); in otx_cpt_dequeue_asym() [all …]
|
| /f-stack/dpdk/app/test-crypto-perf/ |
| H A D | cperf_ops.c | 23 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_security() 91 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_null_cipher() 130 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_null_auth() 169 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_cipher() 202 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_cipher() 225 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_auth() 303 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_auth() 325 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_cipher_auth() 408 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_cipher_auth() 443 for (i = 0; i < nb_ops; i++) { in cperf_set_ops_aead() [all …]
|
| /f-stack/dpdk/drivers/crypto/qat/ |
| H A D | qat_asym_pmd.h | 36 uint16_t nb_ops); 40 uint16_t nb_ops);
|
| /f-stack/dpdk/lib/librte_vhost/ |
| H A D | rte_vhost_crypto.h | 108 struct rte_crypto_op **ops, uint16_t nb_ops); 130 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds);
|
| /f-stack/dpdk/drivers/baseband/turbo_sw/ |
| H A D | bbdev_turbo_software.c | 1163 for (i = 0; i < nb_ops; ++i) in enqueue_enc_all_ops() 1180 for (i = 0; i < nb_ops; ++i) in enqueue_ldpc_enc_all_ops() 1733 for (i = 0; i < nb_ops; ++i) in enqueue_dec_all_ops() 1750 for (i = 0; i < nb_ops; ++i) in enqueue_ldpc_dec_all_ops() 1760 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) in enqueue_enc_ops() argument 1777 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) in enqueue_ldpc_enc_ops() argument 1784 q, ops, nb_ops, &q_data->queue_stats); in enqueue_ldpc_enc_ops() 1830 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) in dequeue_dec_ops() argument 1834 (void **)ops, nb_ops, NULL); in dequeue_dec_ops() 1843 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) in dequeue_enc_ops() argument [all …]
|
| /f-stack/dpdk/drivers/crypto/null/ |
| H A D | null_crypto_pmd.c | 113 uint16_t nb_ops) in null_crypto_pmd_enqueue_burst() argument 120 for (i = 0; i < nb_ops; i++) { in null_crypto_pmd_enqueue_burst() 144 uint16_t nb_ops) in null_crypto_pmd_dequeue_burst() argument 151 (void **)ops, nb_ops, NULL); in null_crypto_pmd_dequeue_burst()
|
| /f-stack/dpdk/lib/librte_regexdev/ |
| H A D | rte_regexdev.h | 1465 struct rte_regex_ops **ops, uint16_t nb_ops) in rte_regexdev_enqueue_burst() argument 1476 return (*dev->enqueue)(dev, qp_id, ops, nb_ops); in rte_regexdev_enqueue_burst() 1524 struct rte_regex_ops **ops, uint16_t nb_ops) in rte_regexdev_dequeue_burst() argument 1535 return (*dev->dequeue)(dev, qp_id, ops, nb_ops); in rte_regexdev_dequeue_burst()
|