xref: /dpdk/drivers/net/bnxt/bnxt_txr.c (revision a41f593f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <inttypes.h>
7 
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 
11 #include "bnxt.h"
12 #include "bnxt_hwrm.h"
13 #include "bnxt_ring.h"
14 #include "bnxt_txq.h"
15 #include "bnxt_txr.h"
16 #include "hsi_struct_def_dpdk.h"
17 #include <stdbool.h>
18 
19 /*
20  * TX Ring handling
21  */
22 
bnxt_free_tx_rings(struct bnxt * bp)23 void bnxt_free_tx_rings(struct bnxt *bp)
24 {
25 	int i;
26 
27 	for (i = 0; i < (int)bp->tx_nr_rings; i++) {
28 		struct bnxt_tx_queue *txq = bp->tx_queues[i];
29 
30 		if (!txq)
31 			continue;
32 
33 		bnxt_free_ring(txq->tx_ring->tx_ring_struct);
34 		rte_free(txq->tx_ring->tx_ring_struct);
35 		rte_free(txq->tx_ring);
36 
37 		bnxt_free_ring(txq->cp_ring->cp_ring_struct);
38 		rte_free(txq->cp_ring->cp_ring_struct);
39 		rte_free(txq->cp_ring);
40 
41 		rte_memzone_free(txq->mz);
42 		txq->mz = NULL;
43 
44 		rte_free(txq);
45 		bp->tx_queues[i] = NULL;
46 	}
47 }
48 
bnxt_init_one_tx_ring(struct bnxt_tx_queue * txq)49 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
50 {
51 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
52 	struct bnxt_ring *ring = txr->tx_ring_struct;
53 
54 	txq->tx_wake_thresh = ring->ring_size / 2;
55 	ring->fw_ring_id = INVALID_HW_RING_ID;
56 
57 	return 0;
58 }
59 
bnxt_init_tx_ring_struct(struct bnxt_tx_queue * txq,unsigned int socket_id)60 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
61 {
62 	struct bnxt_cp_ring_info *cpr;
63 	struct bnxt_tx_ring_info *txr;
64 	struct bnxt_ring *ring;
65 
66 	txr = rte_zmalloc_socket("bnxt_tx_ring",
67 				 sizeof(struct bnxt_tx_ring_info),
68 				 RTE_CACHE_LINE_SIZE, socket_id);
69 	if (txr == NULL)
70 		return -ENOMEM;
71 	txq->tx_ring = txr;
72 
73 	ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
74 				  sizeof(struct bnxt_ring),
75 				  RTE_CACHE_LINE_SIZE, socket_id);
76 	if (ring == NULL)
77 		return -ENOMEM;
78 	txr->tx_ring_struct = ring;
79 	ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
80 	ring->ring_mask = ring->ring_size - 1;
81 	ring->bd = (void *)txr->tx_desc_ring;
82 	ring->bd_dma = txr->tx_desc_mapping;
83 	ring->vmem_size = ring->ring_size * sizeof(struct rte_mbuf *);
84 	ring->vmem = (void **)&txr->tx_buf_ring;
85 	ring->fw_ring_id = INVALID_HW_RING_ID;
86 
87 	cpr = rte_zmalloc_socket("bnxt_tx_ring",
88 				 sizeof(struct bnxt_cp_ring_info),
89 				 RTE_CACHE_LINE_SIZE, socket_id);
90 	if (cpr == NULL)
91 		return -ENOMEM;
92 	txq->cp_ring = cpr;
93 
94 	ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
95 				  sizeof(struct bnxt_ring),
96 				  RTE_CACHE_LINE_SIZE, socket_id);
97 	if (ring == NULL)
98 		return -ENOMEM;
99 	cpr->cp_ring_struct = ring;
100 	ring->ring_size = txr->tx_ring_struct->ring_size;
101 	ring->ring_mask = ring->ring_size - 1;
102 	ring->bd = (void *)cpr->cp_desc_ring;
103 	ring->bd_dma = cpr->cp_desc_mapping;
104 	ring->vmem_size = 0;
105 	ring->vmem = NULL;
106 	ring->fw_ring_id = INVALID_HW_RING_ID;
107 
108 	return 0;
109 }
110 
111 static bool
bnxt_xmit_need_long_bd(struct rte_mbuf * tx_pkt,struct bnxt_tx_queue * txq)112 bnxt_xmit_need_long_bd(struct rte_mbuf *tx_pkt, struct bnxt_tx_queue *txq)
113 {
114 	if (tx_pkt->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_TCP_CKSUM |
115 				RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_IP_CKSUM |
116 				RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_OUTER_IP_CKSUM |
117 				RTE_MBUF_F_TX_TUNNEL_GRE | RTE_MBUF_F_TX_TUNNEL_VXLAN |
118 				RTE_MBUF_F_TX_TUNNEL_GENEVE | RTE_MBUF_F_TX_IEEE1588_TMST |
119 				RTE_MBUF_F_TX_QINQ) ||
120 	     (BNXT_TRUFLOW_EN(txq->bp) &&
121 	      (txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
122 		return true;
123 	return false;
124 }
125 
bnxt_start_xmit(struct rte_mbuf * tx_pkt,struct bnxt_tx_queue * txq,uint16_t * coal_pkts,struct tx_bd_long ** last_txbd)126 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
127 				struct bnxt_tx_queue *txq,
128 				uint16_t *coal_pkts,
129 				struct tx_bd_long **last_txbd)
130 {
131 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
132 	struct bnxt_ring *ring = txr->tx_ring_struct;
133 	uint32_t outer_tpid_bd = 0;
134 	struct tx_bd_long *txbd;
135 	struct tx_bd_long_hi *txbd1 = NULL;
136 	uint32_t vlan_tag_flags;
137 	bool long_bd = false;
138 	unsigned short nr_bds;
139 	uint16_t prod;
140 	struct rte_mbuf *m_seg;
141 	struct rte_mbuf **tx_buf;
142 	static const uint32_t lhint_arr[4] = {
143 		TX_BD_LONG_FLAGS_LHINT_LT512,
144 		TX_BD_LONG_FLAGS_LHINT_LT1K,
145 		TX_BD_LONG_FLAGS_LHINT_LT2K,
146 		TX_BD_LONG_FLAGS_LHINT_LT2K
147 	};
148 
149 	if (unlikely(is_bnxt_in_error(txq->bp)))
150 		return -EIO;
151 
152 	long_bd = bnxt_xmit_need_long_bd(tx_pkt, txq);
153 	nr_bds = long_bd + tx_pkt->nb_segs;
154 
155 	if (unlikely(bnxt_tx_avail(txq) < nr_bds))
156 		return -ENOMEM;
157 
158 	/* Check if number of Tx descriptors is above HW limit */
159 	if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
160 		PMD_DRV_LOG(ERR,
161 			    "Num descriptors %d exceeds HW limit\n", nr_bds);
162 		return -ENOSPC;
163 	}
164 
165 	/* If packet length is less than minimum packet size, pad it */
166 	if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < BNXT_MIN_PKT_SIZE)) {
167 		uint8_t pad = BNXT_MIN_PKT_SIZE - rte_pktmbuf_pkt_len(tx_pkt);
168 		char *seg = rte_pktmbuf_append(tx_pkt, pad);
169 
170 		if (!seg) {
171 			PMD_DRV_LOG(ERR,
172 				    "Failed to pad mbuf by %d bytes\n",
173 				    pad);
174 			return -ENOMEM;
175 		}
176 
177 		/* Note: data_len, pkt len are updated in rte_pktmbuf_append */
178 		memset(seg, 0, pad);
179 	}
180 
181 	/* Check non zero data_len */
182 	RTE_VERIFY(tx_pkt->data_len);
183 
184 	prod = RING_IDX(ring, txr->tx_raw_prod);
185 	tx_buf = &txr->tx_buf_ring[prod];
186 	*tx_buf = tx_pkt;
187 
188 	txbd = &txr->tx_desc_ring[prod];
189 	txbd->opaque = *coal_pkts;
190 	txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
191 	txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
192 	txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
193 	txbd->len = tx_pkt->data_len;
194 	if (tx_pkt->pkt_len >= 2048)
195 		txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
196 	else
197 		txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
198 	txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
199 	*last_txbd = txbd;
200 
201 	if (long_bd) {
202 		txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
203 		vlan_tag_flags = 0;
204 
205 		/* HW can accelerate only outer vlan in QinQ mode */
206 		if (tx_pkt->ol_flags & RTE_MBUF_F_TX_QINQ) {
207 			vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
208 				tx_pkt->vlan_tci_outer;
209 			outer_tpid_bd = txq->bp->outer_tpid_bd &
210 				BNXT_OUTER_TPID_BD_MASK;
211 			vlan_tag_flags |= outer_tpid_bd;
212 		} else if (tx_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
213 			/* shurd: Should this mask at
214 			 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
215 			 */
216 			vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
217 				tx_pkt->vlan_tci;
218 			/* Currently supports 8021Q, 8021AD vlan offloads
219 			 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
220 			 */
221 			/* DPDK only supports 802.11q VLAN packets */
222 			vlan_tag_flags |=
223 					TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
224 		}
225 
226 		txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
227 
228 		prod = RING_IDX(ring, txr->tx_raw_prod);
229 		txbd1 = (struct tx_bd_long_hi *)&txr->tx_desc_ring[prod];
230 		txbd1->lflags = 0;
231 		txbd1->cfa_meta = vlan_tag_flags;
232 		/* Legacy tx_bd_long_hi->mss =
233 		 * tx_bd_long_hi->kid_or_ts_high_mss
234 		 */
235 		txbd1->kid_or_ts_high_mss = 0;
236 
237 		if (txq->vfr_tx_cfa_action)
238 			txbd1->cfa_action = txq->vfr_tx_cfa_action;
239 		else
240 			txbd1->cfa_action = txq->bp->tx_cfa_action;
241 
242 		if (tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
243 			uint16_t hdr_size;
244 
245 			/* TSO */
246 			txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO |
247 					 TX_BD_LONG_LFLAGS_T_IPID;
248 			hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
249 					tx_pkt->l4_len;
250 			hdr_size += (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
251 				    tx_pkt->outer_l2_len +
252 				    tx_pkt->outer_l3_len : 0;
253 			/* The hdr_size is multiple of 16bit units not 8bit.
254 			 * Hence divide by 2.
255 			 * Also legacy hdr_size = kid_or_ts_low_hdr_size.
256 			 */
257 			txbd1->kid_or_ts_low_hdr_size = hdr_size >> 1;
258 			txbd1->kid_or_ts_high_mss = tx_pkt->tso_segsz;
259 			RTE_VERIFY(txbd1->kid_or_ts_high_mss);
260 
261 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
262 			   PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
263 			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
264 			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
265 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
266 			   PKT_TX_OIP_IIP_TCP_CKSUM) {
267 			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
268 			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
269 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
270 			   PKT_TX_OIP_IIP_UDP_CKSUM) {
271 			/* Outer IP, Inner IP, Inner TCP/UDP CSO */
272 			txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
273 		} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
274 			   PKT_TX_IIP_TCP_UDP_CKSUM) {
275 			/* (Inner) IP, (Inner) TCP/UDP CSO */
276 			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
277 		} else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
278 			   PKT_TX_IIP_UDP_CKSUM) {
279 			/* (Inner) IP, (Inner) TCP/UDP CSO */
280 			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
281 		} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
282 			   PKT_TX_IIP_TCP_CKSUM) {
283 			/* (Inner) IP, (Inner) TCP/UDP CSO */
284 			txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
285 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
286 			   PKT_TX_OIP_TCP_UDP_CKSUM) {
287 			/* Outer IP, (Inner) TCP/UDP CSO */
288 			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
289 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
290 			   PKT_TX_OIP_UDP_CKSUM) {
291 			/* Outer IP, (Inner) TCP/UDP CSO */
292 			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
293 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
294 			   PKT_TX_OIP_TCP_CKSUM) {
295 			/* Outer IP, (Inner) TCP/UDP CSO */
296 			txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
297 		} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
298 			   PKT_TX_OIP_IIP_CKSUM) {
299 			/* Outer IP, Inner IP CSO */
300 			txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
301 		} else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
302 			   PKT_TX_TCP_UDP_CKSUM) {
303 			/* TCP/UDP CSO */
304 			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
305 		} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ==
306 			   RTE_MBUF_F_TX_TCP_CKSUM) {
307 			/* TCP/UDP CSO */
308 			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
309 		} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) ==
310 			   RTE_MBUF_F_TX_UDP_CKSUM) {
311 			/* TCP/UDP CSO */
312 			txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
313 		} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ==
314 			   RTE_MBUF_F_TX_IP_CKSUM) {
315 			/* IP CSO */
316 			txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
317 		} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ==
318 			   RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
319 			/* IP CSO */
320 			txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
321 		} else if ((tx_pkt->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) ==
322 			   RTE_MBUF_F_TX_IEEE1588_TMST) {
323 			/* PTP */
324 			txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP;
325 		}
326 	} else {
327 		txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
328 	}
329 
330 	m_seg = tx_pkt->next;
331 	while (m_seg) {
332 		/* Check non zero data_len */
333 		RTE_VERIFY(m_seg->data_len);
334 		txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
335 
336 		prod = RING_IDX(ring, txr->tx_raw_prod);
337 		tx_buf = &txr->tx_buf_ring[prod];
338 		*tx_buf = m_seg;
339 
340 		txbd = &txr->tx_desc_ring[prod];
341 		txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
342 		txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
343 		txbd->len = m_seg->data_len;
344 
345 		m_seg = m_seg->next;
346 	}
347 
348 	txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
349 
350 	txr->tx_raw_prod = RING_NEXT(txr->tx_raw_prod);
351 
352 	return 0;
353 }
354 
355 /*
356  * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
357  * is enabled.
358  */
bnxt_tx_cmp_fast(struct bnxt_tx_queue * txq,int nr_pkts)359 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
360 {
361 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
362 	struct bnxt_ring *ring = txr->tx_ring_struct;
363 	struct rte_mbuf **free = txq->free;
364 	uint16_t raw_cons = txr->tx_raw_cons;
365 	unsigned int blk = 0;
366 	int i, j;
367 
368 	for (i = 0; i < nr_pkts; i++) {
369 		struct rte_mbuf **tx_buf;
370 		unsigned short nr_bds;
371 
372 		tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
373 		nr_bds = (*tx_buf)->nb_segs +
374 			 bnxt_xmit_need_long_bd(*tx_buf, txq);
375 		for (j = 0; j < nr_bds; j++) {
376 			if (*tx_buf) {
377 				/* Add mbuf to the bulk free array */
378 				free[blk++] = *tx_buf;
379 				*tx_buf = NULL;
380 			}
381 			raw_cons = RING_NEXT(raw_cons);
382 			tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
383 		}
384 	}
385 	if (blk)
386 		rte_mempool_put_bulk(free[0]->pool, (void *)free, blk);
387 
388 	txr->tx_raw_cons = raw_cons;
389 }
390 
bnxt_tx_cmp(struct bnxt_tx_queue * txq,int nr_pkts)391 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
392 {
393 	struct bnxt_tx_ring_info *txr = txq->tx_ring;
394 	struct bnxt_ring *ring = txr->tx_ring_struct;
395 	struct rte_mempool *pool = NULL;
396 	struct rte_mbuf **free = txq->free;
397 	uint16_t raw_cons = txr->tx_raw_cons;
398 	unsigned int blk = 0;
399 	int i, j;
400 
401 	for (i = 0; i < nr_pkts; i++) {
402 		struct rte_mbuf *mbuf;
403 		struct rte_mbuf **tx_buf;
404 		unsigned short nr_bds;
405 
406 		tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
407 		nr_bds = (*tx_buf)->nb_segs +
408 			 bnxt_xmit_need_long_bd(*tx_buf, txq);
409 		for (j = 0; j < nr_bds; j++) {
410 			mbuf = *tx_buf;
411 			*tx_buf = NULL;
412 			raw_cons = RING_NEXT(raw_cons);
413 			tx_buf = &txr->tx_buf_ring[RING_IDX(ring, raw_cons)];
414 			if (!mbuf)	/* long_bd's tx_buf ? */
415 				continue;
416 
417 			mbuf = rte_pktmbuf_prefree_seg(mbuf);
418 			if (unlikely(!mbuf))
419 				continue;
420 
421 			/* EW - no need to unmap DMA memory? */
422 
423 			if (likely(mbuf->pool == pool)) {
424 				/* Add mbuf to the bulk free array */
425 				free[blk++] = mbuf;
426 			} else {
427 				/* Found an mbuf from a different pool. Free
428 				 * mbufs accumulated so far to the previous
429 				 * pool
430 				 */
431 				if (likely(pool != NULL))
432 					rte_mempool_put_bulk(pool,
433 							     (void *)free,
434 							     blk);
435 
436 				/* Start accumulating mbufs in a new pool */
437 				free[0] = mbuf;
438 				pool = mbuf->pool;
439 				blk = 1;
440 			}
441 		}
442 	}
443 	if (blk)
444 		rte_mempool_put_bulk(pool, (void *)free, blk);
445 
446 	txr->tx_raw_cons = raw_cons;
447 }
448 
bnxt_handle_tx_cp(struct bnxt_tx_queue * txq)449 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
450 {
451 	uint32_t nb_tx_pkts = 0, cons, ring_mask, opaque;
452 	struct bnxt_cp_ring_info *cpr = txq->cp_ring;
453 	uint32_t raw_cons = cpr->cp_raw_cons;
454 	struct bnxt_ring *cp_ring_struct;
455 	struct tx_cmpl *txcmp;
456 
457 	if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
458 		return 0;
459 
460 	cp_ring_struct = cpr->cp_ring_struct;
461 	ring_mask = cp_ring_struct->ring_mask;
462 
463 	do {
464 		cons = RING_CMPL(ring_mask, raw_cons);
465 		txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
466 
467 		if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
468 			break;
469 
470 		opaque = rte_le_to_cpu_32(txcmp->opaque);
471 
472 		if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
473 			nb_tx_pkts += opaque;
474 		else
475 			RTE_LOG_DP(ERR, PMD,
476 					"Unhandled CMP type %02x\n",
477 					CMP_TYPE(txcmp));
478 		raw_cons = NEXT_RAW_CMP(raw_cons);
479 	} while (nb_tx_pkts < ring_mask);
480 
481 	if (nb_tx_pkts) {
482 		if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
483 			bnxt_tx_cmp_fast(txq, nb_tx_pkts);
484 		else
485 			bnxt_tx_cmp(txq, nb_tx_pkts);
486 		cpr->cp_raw_cons = raw_cons;
487 		bnxt_db_cq(cpr);
488 	}
489 
490 	return nb_tx_pkts;
491 }
492 
bnxt_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)493 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
494 			       uint16_t nb_pkts)
495 {
496 	int rc;
497 	uint16_t nb_tx_pkts = 0;
498 	uint16_t coal_pkts = 0;
499 	struct bnxt_tx_queue *txq = tx_queue;
500 	struct tx_bd_long *last_txbd = NULL;
501 
502 	/* Handle TX completions */
503 	bnxt_handle_tx_cp(txq);
504 
505 	/* Tx queue was stopped; wait for it to be restarted */
506 	if (unlikely(!txq->tx_started)) {
507 		PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
508 		return 0;
509 	}
510 
511 	/* Handle TX burst request */
512 	for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
513 		coal_pkts++;
514 		rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
515 				     &coal_pkts, &last_txbd);
516 
517 		if (unlikely(rc))
518 			break;
519 	}
520 
521 	if (likely(nb_tx_pkts)) {
522 		/* Request a completion on the last packet */
523 		last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
524 		bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_raw_prod);
525 	}
526 
527 	return nb_tx_pkts;
528 }
529 
bnxt_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)530 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
531 {
532 	struct bnxt *bp = dev->data->dev_private;
533 	struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
534 	int rc = 0;
535 
536 	rc = is_bnxt_in_error(bp);
537 	if (rc)
538 		return rc;
539 
540 	bnxt_free_hwrm_tx_ring(bp, tx_queue_id);
541 	rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id);
542 	if (rc)
543 		return rc;
544 
545 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
546 	txq->tx_started = true;
547 	PMD_DRV_LOG(DEBUG, "Tx queue started\n");
548 
549 	return 0;
550 }
551 
bnxt_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)552 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
553 {
554 	struct bnxt *bp = dev->data->dev_private;
555 	struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
556 	int rc = 0;
557 
558 	rc = is_bnxt_in_error(bp);
559 	if (rc)
560 		return rc;
561 
562 	/* Handle TX completions */
563 	bnxt_handle_tx_cp(txq);
564 
565 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
566 	txq->tx_started = false;
567 	PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
568 
569 	return 0;
570 }
571 
572 /* Sweep the Tx completion queue till HWRM_DONE for ring flush is received.
573  * The mbufs will not be freed in this call.
574  * They will be freed during ring free as a part of mem cleanup.
575  */
bnxt_flush_tx_cmp(struct bnxt_cp_ring_info * cpr)576 int bnxt_flush_tx_cmp(struct bnxt_cp_ring_info *cpr)
577 {
578 	uint32_t raw_cons = cpr->cp_raw_cons;
579 	uint32_t cons;
580 	uint32_t nb_tx_pkts = 0;
581 	struct tx_cmpl *txcmp;
582 	struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
583 	struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
584 	uint32_t ring_mask = cp_ring_struct->ring_mask;
585 	uint32_t opaque = 0;
586 
587 	do {
588 		cons = RING_CMPL(ring_mask, raw_cons);
589 		txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
590 
591 		if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
592 			break;
593 
594 		opaque = rte_cpu_to_le_32(txcmp->opaque);
595 		raw_cons = NEXT_RAW_CMP(raw_cons);
596 
597 		if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
598 			nb_tx_pkts += opaque;
599 		else if (CMP_TYPE(txcmp) == HWRM_CMPL_TYPE_HWRM_DONE)
600 			return 1;
601 	} while (nb_tx_pkts < ring_mask);
602 
603 	if (nb_tx_pkts) {
604 		cpr->cp_raw_cons = raw_cons;
605 		bnxt_db_cq(cpr);
606 	}
607 
608 	return 0;
609 }
610