Home
last modified time | relevance | path

Searched refs:tx_q (Results 1 – 23 of 23) sorted by relevance

/f-stack/dpdk/app/test/
H A Dvirtual_pmd.c141 struct virtual_ethdev_queue *tx_q; in virtual_ethdev_tx_queue_setup_success() local
143 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, in virtual_ethdev_tx_queue_setup_success()
146 if (tx_q == NULL) in virtual_ethdev_tx_queue_setup_success()
149 tx_q->port_id = dev->data->port_id; in virtual_ethdev_tx_queue_setup_success()
150 tx_q->queue_id = tx_queue_id; in virtual_ethdev_tx_queue_setup_success()
152 dev->data->tx_queues[tx_queue_id] = tx_q; in virtual_ethdev_tx_queue_setup_success()
363 struct virtual_ethdev_queue *tx_q = queue; in virtual_ethdev_tx_burst_success() local
370 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; in virtual_ethdev_tx_burst_success()
394 struct virtual_ethdev_queue *tx_q = NULL; in virtual_ethdev_tx_burst_fail() local
399 tx_q = queue; in virtual_ethdev_tx_burst_fail()
[all …]
/f-stack/dpdk/examples/vhost/
H A Dmain.c956 tx_q->m_table, tx_q->len); in do_drain_mbuf_table()
958 free_pkts(&tx_q->m_table[count], tx_q->len - count); in do_drain_mbuf_table()
960 tx_q->len = 0; in do_drain_mbuf_table()
970 struct mbuf_table *tx_q; in virtio_tx_route() local
1045 tx_q->m_table[tx_q->len++] = m; in virtio_tx_route()
1052 do_drain_mbuf_table(tx_q); in virtio_tx_route()
1062 if (tx_q->len == 0) in drain_mbuf_table()
1071 tx_q->len); in drain_mbuf_table()
1191 struct mbuf_table *tx_q; in switch_worker() local
1198 tx_q->txq_id = i; in switch_worker()
[all …]
/f-stack/dpdk/drivers/common/qat/
H A Dqat_qp.c238 if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf, in qat_qp_setup()
246 ADF_BYTES_TO_MSG_SIZE(qp->tx_q.msg_size)); in qat_qp_setup()
250 qat_queue_delete(&(qp->tx_q)); in qat_qp_setup()
258 qat_queue_delete(&(qp->tx_q)); in qat_qp_setup()
263 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr, in qat_qp_setup()
327 qat_queue_delete(&(qp->tx_q)); in qat_qp_release()
333 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr, in qat_qp_release()
522 struct qat_queue *queue = &qp->tx_q; in adf_configure_queues()
593 queue = &(tmp_qp->tx_q); in qat_enqueue_op_burst()
694 queue = &(tmp_qp->tx_q); in qat_enqueue_comp_op_burst()
[all …]
H A Dqat_qp.h64 struct qat_queue tx_q; member
/f-stack/dpdk/drivers/crypto/qat/
H A Dqat_sym_hw_dp.c46 tx_queue = &qp->tx_q; in qat_sym_dp_parse_data_vec()
230 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_single_aead()
262 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_aead_jobs()
330 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_single_cipher()
361 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_cipher_jobs()
450 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_single_auth()
482 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_auth_jobs()
632 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_single_chain()
665 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_enqueue_chain_jobs()
824 struct qat_queue *tx_queue = &qp->tx_q; in qat_sym_dp_kick_tail()
[all …]
/f-stack/dpdk/drivers/crypto/bcmfs/
H A Dbcmfs_qp.c178 bcmfs_queue_delete(&qp->tx_q, qp->qpair_id); in bcmfs_qp_release()
235 rc = bcmfs_queue_create(&qp->tx_q, qp_conf, qp->qpair_id, in bcmfs_qp_setup()
298 bcmfs_queue_delete(&qp->tx_q, queue_pair_id); in bcmfs_qp_setup()
H A Dbcmfs_qp.h85 struct bcmfs_queue tx_q; member
/f-stack/dpdk/drivers/net/avp/
H A Davp_ethdev.c894 avp->tx_q[i] = avp_dev_translate_address(eth_dev, in avp_dev_create()
1699 struct rte_avp_fifo *tx_q; in avp_xmit_scattered_pkts() local
1716 tx_q = avp->tx_q[txq->queue_id]; in avp_xmit_scattered_pkts()
1730 count = avp_fifo_free_count(tx_q); in avp_xmit_scattered_pkts()
1764 nb_pkts, tx_q); in avp_xmit_scattered_pkts()
1804 n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts); in avp_xmit_scattered_pkts()
1820 struct rte_avp_fifo *tx_q; in avp_xmit_pkts() local
1835 tx_q = avp->tx_q[txq->queue_id]; in avp_xmit_pkts()
1846 count = avp_fifo_free_count(tx_q); in avp_xmit_pkts()
1859 count, tx_q); in avp_xmit_pkts()
[all …]
/f-stack/dpdk/drivers/net/pfe/
H A Dpfe_hif_lib.c201 hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]); in hif_lib_client_release_tx_buffers()
219 queue = &client->tx_q[qno]; in hif_lib_client_init_tx_buffers()
496 struct hif_client_tx_queue *queue = &client->tx_q[qno]; in hif_lib_xmit_pkt()
522 struct hif_client_tx_queue *queue = &client->tx_q[qno]; in hif_lib_tx_get_next_complete()
H A Dpfe_hif.h47 struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX]; member
H A Dpfe_hif_lib.h67 struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX]; member
H A Dpfe_hif.c251 tx_queue = &client->tx_q[i]; in pfe_hif_client_register()
486 struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no]; in client_ack_txpacket()
H A Dpfe_ethdev.c523 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; in pfe_tx_queue_setup()
524 priv->client.tx_q[queue_idx].queue_id = queue_idx; in pfe_tx_queue_setup()
/f-stack/dpdk/lib/librte_kni/
H A Drte_kni.c70 struct rte_kni_fifo *tx_q; /**< TX queue */ member
277 kni->tx_q = kni->m_tx_q->addr; in rte_kni_alloc()
278 kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX); in rte_kni_alloc()
446 kni_free_fifo(kni->tx_q); in rte_kni_release()
626 unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); in rte_kni_rx_burst()
/f-stack/dpdk/kernel/linux/kni/
H A Dkni_dev.h58 struct rte_kni_fifo *tx_q; member
H A Dkni_net.c291 if (kni_fifo_free_count(kni->tx_q) == 0 || in kni_net_tx()
319 ret = kni_fifo_put(kni->tx_q, &pkt_va, 1); in kni_net_tx()
447 num_tq = kni_fifo_free_count(kni->tx_q); in kni_net_rx_lo_fifo()
501 ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num); in kni_net_rx_lo_fifo()
H A Dkni_misc.c353 kni->tx_q = iova_to_kva(current, dev_info.tx_phys); in kni_ioctl_create()
370 kni->tx_q = phys_to_virt(dev_info.tx_phys); in kni_ioctl_create()
385 (unsigned long long) dev_info.tx_phys, kni->tx_q); in kni_ioctl_create()
/f-stack/dpdk/drivers/crypto/bcmfs/hw/
H A Dbcmfs5_rm.c389 struct bcmfs_queue *txq = &qp->tx_q; in bcmfs5_enqueue_single_request_qp()
442 struct bcmfs_queue *txq = &qp->tx_q; in bcmfs5_write_doorbell()
538 struct bcmfs_queue *tx_queue = &qp->tx_q; in bcmfs5_start_qp()
H A Dbcmfs4_rm.c458 struct bcmfs_queue *txq = &qp->tx_q; in bcmfs4_enqueue_single_request_qp()
606 struct bcmfs_queue *tx_queue = &qp->tx_q; in bcmfs4_start_qp()
/f-stack/dpdk/drivers/compress/qat/
H A Dqat_comp.c61 struct qat_queue *txq = &(cookie->qp->tx_q); in qat_comp_allocate_split_op_memzones()
358 struct qat_queue *txq = &(qp->tx_q); in qat_comp_build_multiple_requests()
/f-stack/dpdk/drivers/net/dpaa2/
H A Ddpaa2_rxtx.c1520 struct dpaa2_queue *tx_q = priv->tx_vq[0]; in dpaa2_dev_loopback_rx() local
1583 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid); in dpaa2_dev_loopback_rx()
/f-stack/dpdk/doc/guides/prog_guide/
H A Dkernel_nic_interface.rst298 The sk_buff is then freed and the mbuf sent in the tx_q FIFO.
/f-stack/dpdk/doc/guides/nics/
H A Dvirtio.rst150 When the virtio port bursts Tx, it is sending packet to the tx_q.