| /dpdk/app/test/ |
| H A D | test_pie.c | 247 *tcfg->tvar->enqueued = 0; in test_rte_pie_init() 291 uint32_t *enqueued, in enqueue_dequeue_func() argument 305 (*enqueued)++; in enqueue_dequeue_func() 344 .enqueued = ft_enqueued, 391 *tcfg->tvar->enqueued = 0; in func_test1() 567 .enqueued = ft_enqueued, 726 uint32_t *enqueued, in enqueue_dequeue_perf() argument 757 (*enqueued)++; in enqueue_dequeue_perf() 783 .enqueued = pt_enqueued 859 .enqueued = pt_enqueued, [all …]
|
| H A D | test_red.c | 294 *tcfg->tvar->enqueued = 0; in test_rte_red_init() 402 .enqueued = ft_enqueued, 482 *tcfg->tvar->enqueued = 0; in func_test1() 507 tcfg->tvar->enqueued, in func_test1() 614 *tcfg->tvar->enqueued = 0; in func_test2() 941 .enqueued = ft5_enqueued, 1213 uint32_t *enqueued, in enqueue_dequeue_perf() argument 1230 (*enqueued)++; in enqueue_dequeue_perf() 1250 .enqueued = pt_enqueued, 1358 .enqueued = pt_enqueued, [all …]
|
| /dpdk/drivers/event/opdl/ |
| H A D | opdl_evdev_init.c | 137 uint16_t enqueued = 0; in opdl_rx_enqueue() local 139 enqueued = opdl_ring_input(opdl_stage_get_opdl_ring(p->enq_stage_inst), in opdl_rx_enqueue() 143 if (!enqueue_check(p, ev, num, enqueued)) in opdl_rx_enqueue() 147 if (enqueued < num) in opdl_rx_enqueue() 150 return enqueued; in opdl_rx_enqueue() 268 uint16_t enqueued = 0; in opdl_disclaim() local 276 enqueued = opdl_stage_disclaim(p->enq_stage_inst, in opdl_disclaim() 280 return enqueue_check(p, ev, num, enqueued); in opdl_disclaim()
|
| /dpdk/doc/guides/cryptodevs/ |
| H A D | scheduler.rst | 105 Round-robin mode, which distributes the enqueued burst of crypto ops 115 worker and the secondary worker, and distributes the enqueued crypto 146 operations fail to be enqueued, then they will be enqueued to the secondary 154 worker cores. The enqueued bursts are distributed among the worker cores in a
|
| H A D | null.rst | 14 each mbuf in the burst will be enqueued in an internal buffer for collection on
|
| /dpdk/drivers/raw/ioat/ |
| H A D | rte_ioat_rawdev_fns.h | 39 uint64_t enqueued; member 160 ioat->xstats.enqueued++; in __ioat_write_desc() 214 ioat->xstats.started = ioat->xstats.enqueued; in __ioat_perform_ops()
|
| /dpdk/drivers/common/qat/ |
| H A D | qat_qp.c | 108 qp->enqueued = qp->dequeued = 0; in qat_qp_setup() 315 if ((qp->enqueued - qp->dequeued) == 0) { in qat_qp_release() 582 tmp_qp->enqueued - tmp_qp->dequeued; in qat_enqueue_op_burst() 627 tmp_qp->enqueued += nb_ops_sent; in qat_enqueue_op_burst() 672 tmp_qp->enqueued - tmp_qp->dequeued; in qat_enqueue_comp_op_burst() 800 tmp_qp->enqueued += total_descriptors_built; in qat_enqueue_comp_op_burst()
|
| H A D | qat_qp.h | 97 uint32_t enqueued; member
|
| /dpdk/doc/guides/eventdevs/ |
| H A D | dsw.rst | 62 send enqueued events immediately to the destination port, but instead 65 In case no more events are enqueued on a port with buffered events,
|
| H A D | opdl.rst | 53 Packets dequeued from this queue do not need to be re-enqueued (as is the 114 is enqueued on RX.
|
| H A D | cnxk.rst | 42 - HW managed packets enqueued from ethdev to eventdev exposed through event eth 48 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
|
| /dpdk/app/test-bbdev/ |
| H A D | test_bbdev_perf.c | 2753 unsigned int enqueued; in throughput_intr_lcore_ldpc_dec() local 2813 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_ldpc_dec() 2826 enqueued += enq; in throughput_intr_lcore_ldpc_dec() 2853 unsigned int enqueued; in throughput_intr_lcore_dec() local 2901 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_dec() 2913 enqueued += enq; in throughput_intr_lcore_dec() 2940 unsigned int enqueued; in throughput_intr_lcore_enc() local 2987 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_enc() 2999 enqueued += enq; in throughput_intr_lcore_enc() 3075 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_ldpc_enc() [all …]
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | bbdev_app.rst | 18 A packet is received on an ethernet port -> enqueued for downlink baseband 19 operation -> dequeued from the downlink baseband device -> enqueued for uplink
|
| H A D | server_node_efd.rst | 60 then enqueued to the specified target node id. 154 The burst of packets received is enqueued in temporary buffers (per node), 155 and enqueued in the shared ring between the server and the node.
|
| /dpdk/doc/guides/bbdevs/ |
| H A D | null.rst | 13 each mbuf in the burst will be enqueued in an internal buffer ring to be
|
| /dpdk/drivers/event/dsw/ |
| H A D | dsw_event.c | 573 uint16_t enqueued = 0; in dsw_port_transmit_buffered() local 582 enqueued += in dsw_port_transmit_buffered() 584 buffer+enqueued, in dsw_port_transmit_buffered() 585 *buffer_len-enqueued, in dsw_port_transmit_buffered() 587 } while (unlikely(enqueued != *buffer_len)); in dsw_port_transmit_buffered()
|
| /dpdk/examples/bbdev_app/ |
| H A D | main.c | 124 unsigned int enqueued; member 562 lcore_id, stats_border, lstats->enqueued); in print_lcore_stats() 791 lcore_stats->enqueued += nb_enq; in run_encoding() 887 lcore_stats->enqueued += nb_enq; in run_decoding()
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | bbdev.rst | 82 When an operation is enqueued to a specific queue ID, the result is dequeued 186 core which it was enqueued on. This means that a baseband burst enqueue/dequeue 299 The enqueue function returns the number of operations it actually enqueued for 301 enqueued. 550 are being enqueued. 552 **NOTE:** It is assumed that all enqueued ops in one ``rte_bbdev_enqueue_enc_ops()`` 592 The case when one CB belongs to TB and is being enqueued individually to BBDEV, 656 to a bigger TB are being enqueued. 697 The case when one CB belongs to TB and is being enqueued individually to BBDEV, 832 **NOTE:** All enqueued ops in one ``rte_bbdev_enqueue_enc_ops()`` [all …]
|
| H A D | regexdev.rst | 165 The enqueue function returns the number of operations it actually enqueued for 167 enqueued.
|
| H A D | event_timer_adapter.rst | 45 The event contained by an event timer is enqueued in the event device when the 50 which the timer expiry event should be enqueued 272 Once an event timer has successfully enqueued a timer expiry event in the event
|
| H A D | cryptodev_lib.rst | 160 core which it was enqueued on. This means that a crypto burst enqueue/dequeue 393 The enqueue function returns the number of operations it actually enqueued for 395 enqueued. 734 return the number of operations enqueued or stored (explained as follows) and 738 - ``1``: the operation(s) is/are enqueued successfully. 740 but is not actually enqueued. The user shall call 749 invalidate any operation stored in the device queue but not enqueued. This 750 feature is useful when the user wants to abandon partially enqueued operations 758 functions to retrieve dequeue count from the enqueued user data and write the
|
| H A D | compressdev.rst | 102 core on which it was enqueued. This means that a compression burst enqueue/dequeue 233 ``rte_compressdev_enqueue_burst()``. If number ops enqueued < number ops requested then 589 that for stateful ops only one op at-a-time should be enqueued from a particular stream i.e. no-two… 618 The enqueue function returns the number of operations it actually enqueued for 620 enqueued.
|
| H A D | event_crypto_adapter.rst | 127 operations should be enqueued to the crypto adapter using 316 enqueued event counts are a sum of the counts from the eventdev PMD callbacks
|
| /dpdk/doc/guides/rawdevs/ |
| H A D | ioat.rst | 232 informs the device hardware of the elements enqueued on the ring, and the 234 reasons, a burst of operations will be enqueued to the device via multiple 273 were enqueued.
|
| /dpdk/doc/guides/rel_notes/ |
| H A D | release_17_05.rst | 262 * Added a packet-size based distribution mode, which distributes the enqueued 265 primary slave first. Then, any operation that cannot be enqueued is 266 enqueued to a secondary slave. 332 operate on multiple packets now return the number of elements enqueued
|