Home
last modified time | relevance | path

Searched refs:enqueued (Results 1 – 25 of 48) sorted by relevance

12

/f-stack/dpdk/app/test/
H A Dtest_red.c270 *tcfg->tvar->enqueued = 0; in test_rte_red_init()
378 .enqueued = ft_enqueued,
458 *tcfg->tvar->enqueued = 0; in func_test1()
483 tcfg->tvar->enqueued, in func_test1()
590 *tcfg->tvar->enqueued = 0; in func_test2()
917 .enqueued = ft5_enqueued,
1189 uint32_t *enqueued, in enqueue_dequeue_perf() argument
1206 (*enqueued)++; in enqueue_dequeue_perf()
1226 .enqueued = pt_enqueued,
1334 .enqueued = pt_enqueued,
[all …]
/f-stack/dpdk/drivers/event/opdl/
H A Dopdl_evdev_init.c137 uint16_t enqueued = 0; in opdl_rx_enqueue() local
139 enqueued = opdl_ring_input(opdl_stage_get_opdl_ring(p->enq_stage_inst), in opdl_rx_enqueue()
143 if (!enqueue_check(p, ev, num, enqueued)) in opdl_rx_enqueue()
147 if (enqueued < num) in opdl_rx_enqueue()
150 return enqueued; in opdl_rx_enqueue()
268 uint16_t enqueued = 0; in opdl_disclaim() local
276 enqueued = opdl_stage_disclaim(p->enq_stage_inst, in opdl_disclaim()
280 return enqueue_check(p, ev, num, enqueued); in opdl_disclaim()
/f-stack/dpdk/drivers/raw/ioat/
H A Drte_ioat_rawdev_fns.h58 uint64_t enqueued; member
246 ioat->xstats.enqueued++; in __ioat_write_desc()
300 ioat->xstats.started = ioat->xstats.enqueued; in __ioat_perform_ops()
400 idxd->xstats.enqueued++; in __idxd_write_desc()
479 idxd->xstats.started = idxd->xstats.enqueued; in __idxd_perform_ops()
/f-stack/dpdk/doc/guides/cryptodevs/
H A Dscheduler.rst105 Round-robin mode, which distributes the enqueued burst of crypto ops
115 worker and the secondary worker, and distributes the enqueued crypto
146 operations fail to be enqueued, then they will be enqueued to the secondary
154 worker cores. The enqueued bursts are distributed among the worker cores in a
H A Dnull.rst14 each mbuf in the burst will be enqueued in an internal buffer for collection on
/f-stack/dpdk/doc/guides/eventdevs/
H A Ddsw.rst62 send enqueued events immediately to the destination port, but instead
65 In case no more events are enqueued on a port with buffered events,
H A Dopdl.rst53 Packets dequeued from this queue do not need to be re-enqueued (as is the
114 is enqueued on RX.
/f-stack/dpdk/drivers/common/qat/
H A Dqat_qp.c236 qp->enqueued = qp->dequeued = 0; in qat_qp_setup()
326 if ((qp->enqueued - qp->dequeued) == 0) { in qat_qp_release()
610 tmp_qp->enqueued - tmp_qp->dequeued; in qat_enqueue_op_burst()
666 tmp_qp->enqueued += nb_ops_sent; in qat_enqueue_op_burst()
711 tmp_qp->enqueued - tmp_qp->dequeued; in qat_enqueue_comp_op_burst()
839 tmp_qp->enqueued += total_descriptors_built; in qat_enqueue_comp_op_burst()
H A Dqat_qp.h74 uint32_t enqueued; member
/f-stack/dpdk/app/test-bbdev/
H A Dtest_bbdev_perf.c2707 unsigned int enqueued; in throughput_intr_lcore_ldpc_dec() local
2768 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_ldpc_dec()
2781 enqueued += enq; in throughput_intr_lcore_ldpc_dec()
2810 unsigned int enqueued; in throughput_intr_lcore_dec() local
2859 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_dec()
2871 enqueued += enq; in throughput_intr_lcore_dec()
2900 unsigned int enqueued; in throughput_intr_lcore_enc() local
2948 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_enc()
2960 enqueued += enq; in throughput_intr_lcore_enc()
3039 for (enqueued = 0; enqueued < num_to_process;) { in throughput_intr_lcore_ldpc_enc()
[all …]
/f-stack/dpdk/doc/guides/sample_app_ug/
H A Dbbdev_app.rst18 A packet is received on an ethernet port -> enqueued for downlink baseband
19 operation -> dequeued from the downlink baseband device -> enqueued for uplink
H A Dserver_node_efd.rst60 then enqueued to the specified target node id.
227 The burst of packets received is enqueued in temporary buffers (per node),
228 and enqueued in the shared ring between the server and the node.
H A Dl2_forward_crypto.rst404 Once the operation has been created, it has to be enqueued in one of the crypto devices.
406 When the buffer has enough operations (MAX_PKT_BURST), they are enqueued in the device,
450 crypto_statistics[cparams->dev_id].enqueued += ret;
H A Dioat.rst421 /* Free any not enqueued packets. */
433 function. When using hardware copy mode the packets are enqueued in
483 /* Free any not enqueued packets. */
/f-stack/dpdk/doc/guides/bbdevs/
H A Dnull.rst13 each mbuf in the burst will be enqueued in an internal buffer ring to be
/f-stack/freebsd/contrib/ncsw/Peripherals/FM/HC/
H A Dhc.c137 volatile bool enqueued[HC_CMD_POOL_SIZE]; /* HC is active - frame is enqueued member
216 ASSERT_COND(!p_FmHc->enqueued[seqNum]); in EnQFrm()
217 p_FmHc->enqueued[seqNum] = TRUE; in EnQFrm()
227 while (p_FmHc->enqueued[seqNum] && --timeout) in EnQFrm()
368 if (!(p_FmHc->enqueued[p_HcFrame->commandSequence])) in FmHcTxConf()
371 p_FmHc->enqueued[p_HcFrame->commandSequence] = FALSE; in FmHcTxConf()
/f-stack/dpdk/drivers/event/dsw/
H A Ddsw_event.c568 uint16_t enqueued = 0; in dsw_port_transmit_buffered() local
577 enqueued += in dsw_port_transmit_buffered()
579 buffer+enqueued, in dsw_port_transmit_buffered()
580 *buffer_len-enqueued, in dsw_port_transmit_buffered()
582 } while (unlikely(enqueued != *buffer_len)); in dsw_port_transmit_buffered()
/f-stack/dpdk/examples/bbdev_app/
H A Dmain.c126 unsigned int enqueued; member
569 lcore_id, stats_border, lstats->enqueued); in print_lcore_stats()
798 lcore_stats->enqueued += nb_enq; in run_encoding()
894 lcore_stats->enqueued += nb_enq; in run_decoding()
/f-stack/dpdk/doc/guides/prog_guide/
H A Dbbdev.rst82 When an operation is enqueued to a specific queue ID, the result is dequeued
186 core which it was enqueued on. This means that a baseband burst enqueue/dequeue
299 The enqueue function returns the number of operations it actually enqueued for
301 enqueued.
550 are being enqueued.
552 **NOTE:** It is assumed that all enqueued ops in one ``rte_bbdev_enqueue_enc_ops()``
592 The case when one CB belongs to TB and is being enqueued individually to BBDEV,
656 to a bigger TB are being enqueued.
697 The case when one CB belongs to TB and is being enqueued individually to BBDEV,
832 **NOTE:** All enqueued ops in one ``rte_bbdev_enqueue_enc_ops()``
[all …]
H A Dregexdev.rst165 The enqueue function returns the number of operations it actually enqueued for
167 enqueued.
H A Devent_timer_adapter.rst45 The event contained by an event timer is enqueued in the event device when the
50 which the timer expiry event should be enqueued
258 Once an event timer has successfully enqueued a timer expiry event in the event
H A Dcryptodev_lib.rst160 core which it was enqueued on. This means that a crypto burst enqueue/dequeue
349 The enqueue function returns the number of operations it actually enqueued for
351 enqueued.
690 return the number of operations enqueued or stored (explained as follows) and
694 - ``1``: the operation(s) is/are enqueued successfully.
696 but is not actually enqueued. The user shall call
705 invalidate any operation stored in the device queue but not enqueued. This
706 feature is useful when the user wants to abandon partially enqueued operations
714 functions to retrieve dequeue count from the enqueued user data and write the
H A Dcompressdev.rst102 core on which it was enqueued. This means that a compression burst enqueue/dequeue
233 ``rte_compressdev_enqueue_burst()``. If number ops enqueued < number ops requested then
589 that for stateful ops only one op at-a-time should be enqueued from a particular stream i.e. no-two…
618 The enqueue function returns the number of operations it actually enqueued for
620 enqueued.
/f-stack/dpdk/doc/guides/rawdevs/
H A Dioat.rst209 informs the device hardware of the elements enqueued on the ring, and the
211 reasons, a burst of operations will be enqueued to the device via multiple
250 were enqueued.
/f-stack/dpdk/drivers/crypto/qat/
H A Dqat_sym_hw_dp.c102 RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
728 inflight = qp->enqueued - qp->dequeued; in qat_sym_dp_dequeue_burst()
830 qp->enqueued += n; in qat_sym_dp_kick_tail()

12