xref: /dpdk/drivers/net/bnxt/bnxt_txq.c (revision c0278f6e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_malloc.h>
9 
10 #include "bnxt.h"
11 #include "bnxt_hwrm.h"
12 #include "bnxt_ring.h"
13 #include "bnxt_txq.h"
14 #include "bnxt_txr.h"
15 
16 /*
17  * TX Queues
18  */
19 
bnxt_get_tx_port_offloads(struct bnxt * bp)20 uint64_t bnxt_get_tx_port_offloads(struct bnxt *bp)
21 {
22 	uint64_t tx_offload_capa;
23 
24 	tx_offload_capa = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
25 			  RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
26 			  RTE_ETH_TX_OFFLOAD_TCP_CKSUM   |
27 			  RTE_ETH_TX_OFFLOAD_TCP_TSO     |
28 			  RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
29 			  RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
30 
31 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
32 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
33 
34 	if (BNXT_TUNNELED_OFFLOADS_CAP_ALL_EN(bp))
35 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
36 
37 	if (BNXT_TUNNELED_OFFLOADS_CAP_VXLAN_EN(bp))
38 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
39 	if (BNXT_TUNNELED_OFFLOADS_CAP_GRE_EN(bp))
40 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
41 	if (BNXT_TUNNELED_OFFLOADS_CAP_NGE_EN(bp))
42 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
43 	if (BNXT_TUNNELED_OFFLOADS_CAP_IPINIP_EN(bp))
44 		tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO;
45 
46 	return tx_offload_capa;
47 }
48 
bnxt_free_txq_stats(struct bnxt_tx_queue * txq)49 void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
50 {
51 	if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
52 		txq->cp_ring->hw_stats = NULL;
53 }
54 
bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue * txq)55 static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
56 {
57 	struct rte_mbuf **sw_ring;
58 	uint16_t i;
59 
60 	if (!txq || !txq->tx_ring)
61 		return;
62 
63 	sw_ring = txq->tx_ring->tx_buf_ring;
64 	if (sw_ring) {
65 		for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
66 			if (sw_ring[i]) {
67 				rte_pktmbuf_free_seg(sw_ring[i]);
68 				sw_ring[i] = NULL;
69 			}
70 		}
71 	}
72 }
73 
bnxt_free_tx_mbufs(struct bnxt * bp)74 void bnxt_free_tx_mbufs(struct bnxt *bp)
75 {
76 	struct bnxt_tx_queue *txq;
77 	int i;
78 
79 	for (i = 0; i < (int)bp->tx_nr_rings; i++) {
80 		txq = bp->tx_queues[i];
81 		bnxt_tx_queue_release_mbufs(txq);
82 	}
83 }
84 
bnxt_tx_queue_release_op(struct rte_eth_dev * dev,uint16_t queue_idx)85 void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
86 {
87 	struct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx];
88 
89 	if (txq) {
90 		if (is_bnxt_in_error(txq->bp))
91 			return;
92 
93 		/* Free TX ring hardware descriptors */
94 		bnxt_free_hwrm_tx_ring(txq->bp, txq->queue_id);
95 		bnxt_tx_queue_release_mbufs(txq);
96 		if (txq->tx_ring) {
97 			bnxt_free_ring(txq->tx_ring->tx_ring_struct);
98 			rte_free(txq->tx_ring->tx_ring_struct);
99 			rte_free(txq->tx_ring);
100 		}
101 
102 		/* Free TX completion ring hardware descriptors */
103 		if (txq->cp_ring) {
104 			bnxt_free_ring(txq->cp_ring->cp_ring_struct);
105 			rte_free(txq->cp_ring->cp_ring_struct);
106 			rte_free(txq->cp_ring);
107 		}
108 
109 		bnxt_free_txq_stats(txq);
110 		rte_memzone_free(txq->mz);
111 		txq->mz = NULL;
112 
113 		rte_free(txq->free);
114 		rte_free(txq);
115 		dev->data->tx_queues[queue_idx] = NULL;
116 	}
117 }
118 
bnxt_tx_queue_setup_op(struct rte_eth_dev * eth_dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)119 int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
120 			       uint16_t queue_idx,
121 			       uint16_t nb_desc,
122 			       unsigned int socket_id,
123 			       const struct rte_eth_txconf *tx_conf)
124 {
125 	struct bnxt *bp = eth_dev->data->dev_private;
126 	struct bnxt_tx_queue *txq;
127 	int rc = 0;
128 
129 	rc = is_bnxt_in_error(bp);
130 	if (rc)
131 		return rc;
132 
133 	if (queue_idx >= bnxt_max_rings(bp)) {
134 		PMD_DRV_LOG(ERR,
135 			"Cannot create Tx ring %d. Only %d rings available\n",
136 			queue_idx, bp->max_tx_rings);
137 		return -EINVAL;
138 	}
139 
140 	if (nb_desc < BNXT_MIN_RING_DESC || nb_desc > MAX_TX_DESC_CNT) {
141 		PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
142 		return -EINVAL;
143 	}
144 
145 	if (eth_dev->data->tx_queues) {
146 		txq = eth_dev->data->tx_queues[queue_idx];
147 		if (txq)
148 			bnxt_tx_queue_release_op(eth_dev, queue_idx);
149 	}
150 	txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
151 				 RTE_CACHE_LINE_SIZE, socket_id);
152 	if (!txq) {
153 		PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
154 		return -ENOMEM;
155 	}
156 
157 	txq->bp = bp;
158 	eth_dev->data->tx_queues[queue_idx] = txq;
159 
160 	txq->free = rte_zmalloc_socket(NULL,
161 				       sizeof(struct rte_mbuf *) * nb_desc,
162 				       RTE_CACHE_LINE_SIZE, socket_id);
163 	if (!txq->free) {
164 		PMD_DRV_LOG(ERR, "allocation of tx mbuf free array failed!");
165 		rc = -ENOMEM;
166 		goto err;
167 	}
168 	txq->nb_tx_desc = nb_desc;
169 	txq->tx_free_thresh =
170 		RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);
171 	txq->offloads = eth_dev->data->dev_conf.txmode.offloads |
172 			tx_conf->offloads;
173 
174 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
175 
176 	rc = bnxt_init_tx_ring_struct(txq, socket_id);
177 	if (rc)
178 		goto err;
179 
180 	txq->queue_id = queue_idx;
181 	txq->port_id = eth_dev->data->port_id;
182 
183 	/* Allocate TX ring hardware descriptors */
184 	if (bnxt_alloc_rings(bp, socket_id, queue_idx, txq, NULL, txq->cp_ring,
185 			     NULL, "txr")) {
186 		PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
187 		rc = -ENOMEM;
188 		goto err;
189 	}
190 
191 	if (bnxt_init_one_tx_ring(txq)) {
192 		PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
193 		rc = -ENOMEM;
194 		goto err;
195 	}
196 
197 	return 0;
198 err:
199 	bnxt_tx_queue_release_op(eth_dev, queue_idx);
200 	return rc;
201 }
202