| /dpdk/drivers/net/netvsc/ |
| H A D | hn_rxtx.c | 156 memset(txd, 0, sizeof(*txd)); in hn_txd_init() 344 txd->m = NULL; in hn_txd_get() 345 txd->packets = 0; in hn_txd_get() 346 txd->data_size = 0; in hn_txd_get() 347 txd->chim_size = 0; in hn_txd_get() 349 return txd; in hn_txd_get() 405 if (!txd) in hn_nvs_send_completed() 412 txd->packets, txd->data_size); in hn_nvs_send_completed() 1157 ++txd->packets; in hn_append_to_chim() 1181 if (!txd) in hn_flush_txagg() [all …]
|
| /dpdk/drivers/net/nfp/ |
| H A D | nfp_rxtx.c | 39 struct nfp_net_tx_desc *txd, 42 struct nfp_net_tx_desc *txd, 835 txd->l3_offset = mb->l2_len; in nfp_net_tx_tso() 843 txd->flags = 0; in nfp_net_tx_tso() 844 txd->l3_offset = 0; in nfp_net_tx_tso() 845 txd->l4_offset = 0; in nfp_net_tx_tso() 846 txd->lso_hdrlen = 0; in nfp_net_tx_tso() 847 txd->mss = 0; in nfp_net_tx_tso() 936 txd.data_len = pkt->pkt_len; in nfp_net_xmit_pkts() 943 txd.vlan = pkt->vlan_tci; in nfp_net_xmit_pkts() [all …]
|
| /dpdk/drivers/net/atlantic/ |
| H A D | atl_rxtx.c | 753 if (txd->dd) in atl_dev_tx_descriptor_status() 1136 if (txd->dd) in atl_xmit_cleanup() 1157 if (txd->dd) in atl_xmit_cleanup() 1160 txd->buf_addr = 0; in atl_xmit_cleanup() 1161 txd->flags = 0; in atl_xmit_cleanup() 1213 txd->cmd |= tx_cmd; in atl_setup_csum_offload() 1248 txd->flags = 0U; in atl_xmit_pkt() 1253 txd->ct_en = !!tx_cmd; in atl_xmit_pkt() 1260 txd->ct_idx = 0; in atl_xmit_pkt() 1269 txd->flags = 0U; in atl_xmit_pkt() [all …]
|
| /dpdk/drivers/net/failsafe/ |
| H A D | failsafe_private.h | 454 struct sub_device *txd; in fs_switch_dev() local 458 txd = TX_SUBDEV(dev); in fs_switch_dev() 461 if (txd != PREFERRED_SUBDEV(dev) && in fs_switch_dev() 462 (txd == NULL || in fs_switch_dev() 464 (txd && txd->state < DEV_STARTED))) { in fs_switch_dev() 468 } else if ((txd && txd->state < req_state) || in fs_switch_dev() 469 txd == NULL || in fs_switch_dev() 470 txd == banned) { in fs_switch_dev()
|
| /dpdk/drivers/net/sfc/ |
| H A D | sfc_ef10_tx.c | 154 struct sfc_ef10_tx_sw_desc *txd; in sfc_ef10_tx_reap() local 157 txd = &txq->sw_ring[completed & ptr_mask]; in sfc_ef10_tx_reap() 158 if (txd->mbuf == NULL) in sfc_ef10_tx_reap() 161 m = rte_pktmbuf_prefree_seg(txd->mbuf); in sfc_ef10_tx_reap() 162 txd->mbuf = NULL; in sfc_ef10_tx_reap() 760 struct sfc_ef10_tx_sw_desc *txd; in sfc_ef10_simple_tx_reap() local 770 bulk[nb++] = txd->mbuf; in sfc_ef10_simple_tx_reap() 1069 struct sfc_ef10_tx_sw_desc *txd; in sfc_ef10_tx_qreap() local 1072 if (txd->mbuf != NULL) { in sfc_ef10_tx_qreap() 1073 rte_pktmbuf_free_seg(txd->mbuf); in sfc_ef10_tx_qreap() [all …]
|
| H A D | sfc_ef100_tx.c | 281 struct sfc_ef100_tx_sw_desc *txd; in sfc_ef100_tx_reap_num_descs() local 284 txd = &txq->sw_ring[completed & txq->ptr_mask]; in sfc_ef100_tx_reap_num_descs() 285 if (txd->mbuf == NULL) in sfc_ef100_tx_reap_num_descs() 288 m = rte_pktmbuf_prefree_seg(txd->mbuf); in sfc_ef100_tx_reap_num_descs() 292 txd->mbuf = NULL; in sfc_ef100_tx_reap_num_descs() 958 struct sfc_ef100_tx_sw_desc *txd; in sfc_ef100_tx_qreap() local 960 txd = &txq->sw_ring[completed & txq->ptr_mask]; in sfc_ef100_tx_qreap() 961 if (txd->mbuf != NULL) { in sfc_ef100_tx_qreap() 962 rte_pktmbuf_free_seg(txd->mbuf); in sfc_ef100_tx_qreap() 963 txd->mbuf = NULL; in sfc_ef100_tx_qreap()
|
| H A D | sfc_tx.c | 769 struct sfc_efx_tx_sw_desc *txd; in sfc_efx_tx_reap() local 771 txd = &txq->sw_ring[completed & txq->ptr_mask]; in sfc_efx_tx_reap() 773 if (txd->mbuf != NULL) { in sfc_efx_tx_reap() 774 rte_pktmbuf_free(txd->mbuf); in sfc_efx_tx_reap() 775 txd->mbuf = NULL; in sfc_efx_tx_reap()
|
| /dpdk/drivers/net/vmxnet3/ |
| H A D | vmxnet3_rxtx.c | 501 gdesc->txd.addr = in vmxnet3_xmit_pkts() 526 gdesc->txd.ti = 1; in vmxnet3_xmit_pkts() 527 gdesc->txd.tci = txm->vlan_tci; in vmxnet3_xmit_pkts() 536 gdesc->txd.om = VMXNET3_OM_TSO; in vmxnet3_xmit_pkts() 537 gdesc->txd.msscof = mss; in vmxnet3_xmit_pkts() 541 gdesc->txd.om = VMXNET3_OM_CSUM; in vmxnet3_xmit_pkts() 546 gdesc->txd.msscof = gdesc->txd.hlen + in vmxnet3_xmit_pkts() 550 gdesc->txd.msscof = gdesc->txd.hlen + in vmxnet3_xmit_pkts() 561 gdesc->txd.hlen = 0; in vmxnet3_xmit_pkts() 562 gdesc->txd.om = VMXNET3_OM_NONE; in vmxnet3_xmit_pkts() [all …]
|
| /dpdk/drivers/net/i40e/ |
| H A D | i40e_rxtx.c | 403 txd[desc_to_clean_to].cmd_type_offset_bsz = 0; in i40e_xmit_cleanup() 1063 struct rte_mbuf *txd = tx_pkt; in i40e_calc_pkt_desc() local 1066 while (txd != NULL) { in i40e_calc_pkt_desc() 1068 txd = txd->next; in i40e_calc_pkt_desc() 1080 volatile struct i40e_tx_desc *txd; in i40e_xmit_pkts() local 1241 txd = &txr[tx_id]; in i40e_xmit_pkts() 1254 txd->buffer_addr = in i40e_xmit_pkts() 1256 txd->cmd_type_offset_bsz = in i40e_xmit_pkts() 1267 txd = &txr[tx_id]; in i40e_xmit_pkts() 1305 txd->cmd_type_offset_bsz |= in i40e_xmit_pkts() [all …]
|
| /dpdk/doc/guides/howto/ |
| H A D | virtio_user_as_exceptional_path.rst | 61 --txd=1024 --rxd=1024 85 --txq=2 --rxq=2 --txd=1024 --rxd=1024
|
| /dpdk/drivers/net/ice/ |
| H A D | ice_rxtx.c | 943 txd->cmd_type_offset_bsz = in ice_reset_tx_queue() 2640 txd[desc_to_clean_to].cmd_type_offset_bsz = 0; in ice_xmit_cleanup() 2708 struct rte_mbuf *txd = tx_pkt; in ice_calc_pkt_desc() local 2711 while (txd != NULL) { in ice_calc_pkt_desc() 2713 txd = txd->next; in ice_calc_pkt_desc() 2724 volatile struct ice_tx_desc *txd; in ice_xmit_pkts() local 2865 txd = &tx_ring[tx_id]; in ice_xmit_pkts() 2879 txd->cmd_type_offset_bsz = in ice_xmit_pkts() 2893 txd = &tx_ring[tx_id]; in ice_xmit_pkts() 2898 txd->cmd_type_offset_bsz = in ice_xmit_pkts() [all …]
|
| /dpdk/drivers/net/ixgbe/ |
| H A D | ixgbe_rxtx_vec_common.h | 222 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; in _ixgbe_reset_tx_queue_vec() local 224 txd->wb.status = IXGBE_TXD_STAT_DD; in _ixgbe_reset_tx_queue_vec()
|
| H A D | ixgbe_rxtx.c | 631 volatile union ixgbe_adv_tx_desc *txd, *txp; in ixgbe_xmit_pkts() local 887 txd = &txr[tx_id]; in ixgbe_xmit_pkts() 900 txd->read.buffer_addr = in ixgbe_xmit_pkts() 902 txd->read.cmd_type_len = in ixgbe_xmit_pkts() 904 txd->read.olinfo_status = in ixgbe_xmit_pkts() 932 txp = txd; in ixgbe_xmit_pkts() 934 txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len); in ixgbe_xmit_pkts() 2510 volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; in ixgbe_reset_tx_queue() local 2512 txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); in ixgbe_reset_tx_queue()
|
| /dpdk/drivers/net/igc/ |
| H A D | igc_txrx.c | 1614 volatile union igc_adv_tx_desc *txd; in igc_xmit_pkts() local 1799 txd = &txr[tx_id]; in igc_xmit_pkts() 1808 txd->read.buffer_addr = in igc_xmit_pkts() 1810 txd->read.cmd_type_len = in igc_xmit_pkts() 1812 txd->read.olinfo_status = in igc_xmit_pkts() 1824 txd->read.cmd_type_len |= in igc_xmit_pkts() 1909 volatile union igc_adv_tx_desc *txd = &txq->tx_ring[i]; in igc_reset_tx_queue() local 1911 txd->wb.status = IGC_TXD_STAT_DD; in igc_reset_tx_queue()
|
| /dpdk/drivers/net/e1000/ |
| H A D | em_rxtx.c | 373 volatile struct e1000_data_desc *txd; in eth_em_xmit_pkts() local 551 txd = &txr[tx_id]; in eth_em_xmit_pkts() 564 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); in eth_em_xmit_pkts() 565 txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen); in eth_em_xmit_pkts() 566 txd->upper.data = rte_cpu_to_le_32(popts_spec); in eth_em_xmit_pkts() 593 txd->lower.data |= rte_cpu_to_le_32(cmd_type_len); in eth_em_xmit_pkts()
|
| H A D | igb_rxtx.c | 386 volatile union e1000_adv_tx_desc *txd; in eth_igb_xmit_pkts() local 571 txd = &txr[tx_id]; in eth_igb_xmit_pkts() 582 txd->read.buffer_addr = in eth_igb_xmit_pkts() 584 txd->read.cmd_type_len = in eth_igb_xmit_pkts() 586 txd->read.olinfo_status = in eth_igb_xmit_pkts() 598 txd->read.cmd_type_len |= in eth_igb_xmit_pkts() 1438 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]); in igb_reset_tx_queue() local 1440 txd->wb.status = E1000_TXD_STAT_DD; in igb_reset_tx_queue()
|
| /dpdk/drivers/net/mlx5/ |
| H A D | mlx5_tx.c | 538 struct mlx5_txq_data *txd = (*priv->txqs)[0]; in mlx5_select_tx_function() local 540 if (txd->inlen_send) { in mlx5_select_tx_function()
|
| /dpdk/drivers/net/ngbe/ |
| H A D | ngbe_rxtx.c | 578 volatile struct ngbe_tx_desc *txd; in ngbe_xmit_pkts() local 801 txd = &txr[tx_id]; in ngbe_xmit_pkts() 814 txd->qw0 = rte_cpu_to_le_64(buf_dma_addr); in ngbe_xmit_pkts() 815 txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen); in ngbe_xmit_pkts() 816 txd->dw3 = rte_cpu_to_le_32(olinfo_status); in ngbe_xmit_pkts() 829 txd->dw2 |= rte_cpu_to_le_32(cmd_type_len); in ngbe_xmit_pkts() 1846 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i]; in ngbe_reset_tx_queue() local 1848 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD); in ngbe_reset_tx_queue()
|
| /dpdk/doc/guides/testpmd_app_ug/ |
| H A D | run_app.rst | 286 * ``--txd=N`` 363 Set the transmit free threshold of TX rings to N, where 0 <= N <= value of ``--txd``. 368 Set the transmit RS bit threshold of TX rings to N, where 0 <= N <= value of ``--txd``. 634 port config all rxq|txq|rxd|txd <value>
|
| /dpdk/doc/guides/nics/ |
| H A D | mvneta.rst | 148 -i --p 3 -a --txd 256 --rxd 128 --rxq=1 --txq=1 --nb-cores=1
|
| H A D | mvpp2.rst | 166 --burst=128 --txd=2048 --rxd=1024 --rxq=2 --txq=2 --nb-cores=2 \ 599 ./dpdk-testpmd --vdev=eth_mvpp2,iface=eth0,iface=eth2 -c 6 -- -i -p 3 -a --txd 1024 --rxd 1024 679 -i -p 3 --disable-hw-vlan-strip --rxq 3 --txq 3 --txd 1024 --rxd 1024
|
| /dpdk/drivers/net/txgbe/ |
| H A D | txgbe_rxtx.c | 726 volatile struct txgbe_tx_desc *txd; in txgbe_xmit_pkts() local 982 txd = &txr[tx_id]; in txgbe_xmit_pkts() 995 txd->qw0 = rte_cpu_to_le_64(buf_dma_addr); in txgbe_xmit_pkts() 996 txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen); in txgbe_xmit_pkts() 997 txd->dw3 = rte_cpu_to_le_32(olinfo_status); in txgbe_xmit_pkts() 1010 txd->dw2 |= rte_cpu_to_le_32(cmd_type_len); in txgbe_xmit_pkts() 2152 volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i]; in txgbe_reset_tx_queue() local 2154 txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD); in txgbe_reset_tx_queue()
|
| /dpdk/doc/guides/tools/ |
| H A D | flow-perf.rst | 125 * ``--txd=N`` 126 Set the count of txd, default is 256.
|
| /dpdk/drivers/net/vmxnet3/base/ |
| H A D | vmxnet3_defs.h | 381 Vmxnet3_TxDesc txd; member
|
| /dpdk/app/test-pmd/ |
| H A D | testpmd.c | 1468 check_nb_txd(queueid_t txd) in check_nb_txd() argument 1475 if (txd > allowed_max_txd) { in check_nb_txd() 1478 txd, allowed_max_txd, pid); in check_nb_txd() 1483 if (txd < allowed_min_txd) { in check_nb_txd() 1486 txd, allowed_min_txd, pid); in check_nb_txd()
|