Home
last modified time | relevance | path

Searched refs:qidx (Results 1 – 25 of 34) sorted by relevance

12

/f-stack/dpdk/drivers/net/thunderx/base/
H A Dnicvf_hw.c272 uint16_t qidx; in nicvf_handle_qset_err_intr() local
278 for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) { in nicvf_handle_qset_err_intr()
293 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { in nicvf_handle_qset_err_intr()
308 for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) { in nicvf_handle_qset_err_intr()
310 NIC_QSET_RBDR_0_1_STATUS0, qidx); in nicvf_handle_qset_err_intr()
406 if (nicvf_qset_poll_reg(nic, qidx, in nicvf_qset_rbdr_reclaim()
411 if (nicvf_qset_poll_reg(nic, qidx, in nicvf_qset_rbdr_reclaim()
554 if (nicvf_mbox_sq_config(nic, qidx)) in nicvf_qset_sq_config()
606 pf_rq_cfg.cq_idx = qidx; in nicvf_qset_rq_config()
902 uint16_t qidx) in nicvf_hw_get_rx_qstats() argument
[all …]
H A Dnicvf_hw.h124 nicvf_qset_base(struct nicvf *nic, uint32_t qidx) in nicvf_qset_base() argument
126 return nic->reg_base + (qidx << NIC_Q_NUM_SHIFT); in nicvf_qset_base()
133 nicvf_addr_write(nicvf_qset_base(nic, qidx) + offset, val); in nicvf_queue_reg_write()
177 int nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx,
179 int nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx);
181 int nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx,
183 int nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx);
185 int nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx,
187 int nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx);
214 struct nicvf_hw_rx_qstats *qstats, uint16_t qidx);
[all …]
H A Dnicvf_mbox.c273 nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx, in nicvf_mbox_rq_config() argument
280 mbx.rq.rq_num = qidx; in nicvf_mbox_rq_config()
286 nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx) in nicvf_mbox_sq_config() argument
292 mbx.sq.sq_num = qidx; in nicvf_mbox_sq_config()
294 mbx.sq.cfg = (nic->vf_id << 3) | qidx; in nicvf_mbox_sq_config()
336 nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable) in nicvf_mbox_rq_drop_config() argument
344 mbx.rq.rq_num = qidx; in nicvf_mbox_rq_drop_config()
377 nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable) in nicvf_mbox_rq_bp_config() argument
383 mbx.rq.rq_num = qidx; in nicvf_mbox_rq_bp_config()
H A Dnicvf_mbox.h207 int nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
209 int nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx);
210 int nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable);
211 int nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable);
/f-stack/dpdk/drivers/net/thunderx/
H A Dnicvf_ethdev.c235 for (qidx = rx_start; qidx <= rx_end; qidx++) { in nicvf_dev_stats_get()
248 for (qidx = tx_start; qidx <= tx_end; qidx++) { in nicvf_dev_stats_get()
267 for (qidx = rx_start; qidx <= rx_end; qidx++) { in nicvf_dev_stats_get()
280 for (qidx = tx_start; qidx <= tx_end; qidx++) { in nicvf_dev_stats_get()
674 for (qidx = rx_start; qidx <= rx_end; qidx++) { in nicvf_rbdr_release_mbuf()
1455 for (qidx = rx_start; qidx <= rx_end; qidx++) { in rbdr_rte_mempool_get()
1504 for (qidx = rx_start; qidx <= rx_end; qidx++) { in nicvf_vf_start()
1658 for (qidx = rx_start; qidx <= rx_end; qidx++) in nicvf_vf_start()
1661 for (qidx = tx_start; qidx <= tx_end; qidx++) in nicvf_vf_start()
1825 for (qidx = tx_start; qidx <= tx_end; qidx++) in nicvf_vf_stop()
[all …]
/f-stack/dpdk/drivers/net/octeontx/
H A Docteontx_ethdev.c904 uint16_t qidx) in octeontx_vf_start_tx_queue() argument
914 txq = dev->data->tx_queues[qidx]; in octeontx_vf_start_tx_queue()
943 qidx = qidx % PKO_VF_NUM_DQ; in octeontx_dev_tx_queue_start()
949 uint16_t qidx) in octeontx_vf_stop_tx_queue() argument
969 qidx = qidx % PKO_VF_NUM_DQ; in octeontx_dev_tx_queue_stop()
1016 qidx); in octeontx_dev_tx_queue_setup()
1018 dev->data->tx_queues[qidx] = NULL; in octeontx_dev_tx_queue_setup()
1032 dev->data->tx_queues[qidx] = txq; in octeontx_dev_tx_queue_setup()
1202 port, qidx); in octeontx_dev_rx_queue_setup()
1211 rxq->queue_id = qidx; in octeontx_dev_rx_queue_setup()
[all …]
H A Docteontx_ethdev.h169 int octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx);
170 int octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx);
/f-stack/dpdk/drivers/net/octeontx2/
H A Dotx2_stats.c54 uint32_t qidx, i; in otx2_nix_dev_stats_get() local
83 qidx = dev->txmap[i] & 0xFFFF; in otx2_nix_dev_stats_get()
84 reg = (((uint64_t)qidx) << 32); in otx2_nix_dev_stats_get()
108 qidx = dev->rxmap[i] & 0xFFFF; in otx2_nix_dev_stats_get()
109 reg = (((uint64_t)qidx) << 32); in otx2_nix_dev_stats_get()
313 aq->qidx = i; in nix_queue_stats_reset()
322 aq->qidx = i; in nix_queue_stats_reset()
347 aq->qidx = i; in nix_queue_stats_reset()
356 aq->qidx = i; in nix_queue_stats_reset()
H A Dotx2_ethdev.c308 aq->qidx = qid; in nix_cq_rq_init()
354 aq->qidx = qid; in nix_cq_rq_init()
394 aq->qidx = qid; in nix_cq_rq_init()
416 aq->qidx = qid; in nix_cq_rq_init()
439 aq->qidx = rxq->rq; in nix_rq_enb_dis()
460 aq->qidx = rxq->rq; in nix_cq_rq_uninit()
475 aq->qidx = rxq->rq; in nix_cq_rq_uninit()
497 aq->qidx = rxq->rq; in nix_cq_rq_uninit()
931 sq->qidx = txq->sq; in nix_sq_init()
2061 qidx, rc); in otx2_nix_tx_queue_start()
[all …]
H A Dotx2_ethdev.h415 uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx);
426 int otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx);
427 int otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx);
H A Dotx2_rss.c33 req->qidx = (group * rss->rss_size) + idx; in otx2_nix_rss_tbl_init()
56 req->qidx = (group * rss->rss_size) + idx; in otx2_nix_rss_tbl_init()
/f-stack/dpdk/drivers/crypto/ccp/
H A Dccp_crypto.c1551 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_passthru()
1619 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_hmac()
1702 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_hmac()
1792 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_sha()
1872 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_sha3_hmac()
1948 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_sha3_hmac()
2013 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_sha3()
2260 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_aes()
2353 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_3des()
2442 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; in ccp_perform_aes_gcm()
[all …]
H A Dccp_dev.c49 if (dev->qidx >= dev->cmd_q_count) in ccp_allot_queue()
50 dev->qidx = 0; in ccp_allot_queue()
51 ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); in ccp_allot_queue()
53 return &dev->cmd_q[dev->qidx]; in ccp_allot_queue()
55 dev->qidx++; in ccp_allot_queue()
56 if (dev->qidx >= dev->cmd_q_count) in ccp_allot_queue()
57 dev->qidx = 0; in ccp_allot_queue()
58 ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); in ccp_allot_queue()
60 return &dev->cmd_q[dev->qidx]; in ccp_allot_queue()
496 dev->qidx = 0; in ccp_add_device()
[all …]
H A Dccp_dev.h196 uint64_t qidx; /**< queue index */ member
242 int qidx; member
/f-stack/dpdk/drivers/net/enetc/
H A Denetc_ethdev.c747 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) in enetc_rx_queue_start() argument
754 rx_ring = dev->data->rx_queues[qidx]; in enetc_rx_queue_start()
761 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; in enetc_rx_queue_start()
768 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) in enetc_rx_queue_stop() argument
775 rx_ring = dev->data->rx_queues[qidx]; in enetc_rx_queue_stop()
782 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; in enetc_rx_queue_stop()
789 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) in enetc_tx_queue_start() argument
796 tx_ring = dev->data->tx_queues[qidx]; in enetc_tx_queue_start()
803 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; in enetc_tx_queue_start()
810 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) in enetc_tx_queue_stop() argument
[all …]
/f-stack/dpdk/drivers/net/ark/
H A Dark_ethdev_rx.c131 int qidx = queue_idx; in eth_ark_dev_rx_queue_setup() local
171 queue->phys_qid = qidx; in eth_ark_dev_rx_queue_setup()
198 queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET); in eth_ark_dev_rx_queue_setup()
199 queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET); in eth_ark_dev_rx_queue_setup()
206 nb_desc, qidx); in eth_ark_dev_rx_queue_setup()
211 status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx); in eth_ark_dev_rx_queue_setup()
217 qidx, in eth_ark_dev_rx_queue_setup()
H A Dark_ethdev_tx.c220 int qidx = queue_idx; in eth_ark_tx_queue_setup() local
245 queue->phys_qid = qidx; in eth_ark_tx_queue_setup()
269 queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET); in eth_ark_tx_queue_setup()
270 queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET); in eth_ark_tx_queue_setup()
/f-stack/freebsd/contrib/octeon-sdk/
H A Dcvmx-pow.c615 int qidx = idx % 3; in __cvmx_pow_display_v2() local
617 if (head[qidx] == tail[qidx]) in __cvmx_pow_display_v2()
618 valid[qidx] = 0; in __cvmx_pow_display_v2()
620 if (__cvmx_pow_entry_mark_list(head[qidx], CVMX_POW_LIST_FREE, entry_list)) in __cvmx_pow_display_v2()
622 head[qidx] = dump->smemload[head[qidx]][4].s_smemload3_cn68xx.fwd_index; in __cvmx_pow_display_v2()
/f-stack/dpdk/drivers/net/nfp/
H A Dnfp_net_pmd.h249 int qidx; member
380 int qidx; member
H A Dnfp_net.c1568 rxq->qidx = queue_idx; in nfp_net_rx_queue_setup()
1651 (unsigned)rxq->qidx); in nfp_net_rx_fill_freelist()
1755 txq->qidx = queue_idx; in nfp_net_tx_queue_setup()
2065 rxq->port_id, (unsigned int)rxq->qidx); in nfp_net_recv_pkts()
2151 rxq->port_id, (unsigned int)rxq->qidx, nb_hold); in nfp_net_recv_pkts()
2162 rxq->port_id, (unsigned int)rxq->qidx, in nfp_net_recv_pkts()
2185 " status", txq->qidx); in nfp_net_tx_free_bufs()
2192 "packets (%u, %u)", txq->qidx, in nfp_net_tx_free_bufs()
2257 txq->qidx, txq->wr_p, nb_pkts); in nfp_net_xmit_pkts()
2271 txq->qidx, nb_pkts); in nfp_net_xmit_pkts()
/f-stack/dpdk/drivers/event/octeontx2/
H A Dotx2_evdev_adptr.c33 aq->qidx = qid; in sso_rxq_enable()
51 aq->qidx = qid; in sso_rxq_enable()
134 aq->qidx = qid; in sso_rxq_disable()
152 aq->qidx = qid; in sso_rxq_disable()
/f-stack/dpdk/examples/rxtx_callbacks/
H A Dmain.c61 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused, in add_timestamps()
74 calc_latency(uint16_t port, uint16_t qidx __rte_unused, in calc_latency()
/f-stack/dpdk/drivers/net/bnxt/
H A Dbnxt_ring.h69 int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
/f-stack/dpdk/doc/guides/sample_app_ug/
H A Drxtx_callbacks.rst148 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
174 calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
/f-stack/dpdk/lib/librte_pdump/
H A Drte_pdump.c105 pdump_rx(uint16_t port __rte_unused, uint16_t qidx __rte_unused, in pdump_rx()
115 pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused, in pdump_tx()

12