1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
3 */
4
5 #include <rte_malloc.h>
6 #include <rte_ethdev_driver.h>
7 #include <rte_net.h>
8
9 #include "atl_ethdev.h"
10 #include "atl_hw_regs.h"
11
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16
17 #define ATL_TX_CKSUM_OFFLOAD_MASK ( \
18 PKT_TX_IP_CKSUM | \
19 PKT_TX_L4_MASK | \
20 PKT_TX_TCP_SEG)
21
22 #define ATL_TX_OFFLOAD_MASK ( \
23 PKT_TX_VLAN | \
24 PKT_TX_IPV6 | \
25 PKT_TX_IPV4 | \
26 PKT_TX_IP_CKSUM | \
27 PKT_TX_L4_MASK | \
28 PKT_TX_TCP_SEG)
29
30 #define ATL_TX_OFFLOAD_NOTSUP_MASK \
31 (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
32
33 /**
34 * Structure associated with each descriptor of the RX ring of a RX queue.
35 */
36 struct atl_rx_entry {
37 struct rte_mbuf *mbuf;
38 };
39
40 /**
41 * Structure associated with each descriptor of the TX ring of a TX queue.
42 */
43 struct atl_tx_entry {
44 struct rte_mbuf *mbuf;
45 uint16_t next_id;
46 uint16_t last_id;
47 };
48
49 /**
50 * Structure associated with each RX queue.
51 */
52 struct atl_rx_queue {
53 struct rte_mempool *mb_pool;
54 struct hw_atl_rxd_s *hw_ring;
55 uint64_t hw_ring_phys_addr;
56 struct atl_rx_entry *sw_ring;
57 uint16_t nb_rx_desc;
58 uint16_t rx_tail;
59 uint16_t nb_rx_hold;
60 uint16_t rx_free_thresh;
61 uint16_t queue_id;
62 uint16_t port_id;
63 uint16_t buff_size;
64 bool l3_csum_enabled;
65 bool l4_csum_enabled;
66 };
67
68 /**
69 * Structure associated with each TX queue.
70 */
71 struct atl_tx_queue {
72 struct hw_atl_txd_s *hw_ring;
73 uint64_t hw_ring_phys_addr;
74 struct atl_tx_entry *sw_ring;
75 uint16_t nb_tx_desc;
76 uint16_t tx_tail;
77 uint16_t tx_head;
78 uint16_t queue_id;
79 uint16_t port_id;
80 uint16_t tx_free_thresh;
81 uint16_t tx_free;
82 };
83
84 static inline void
atl_reset_rx_queue(struct atl_rx_queue * rxq)85 atl_reset_rx_queue(struct atl_rx_queue *rxq)
86 {
87 struct hw_atl_rxd_s *rxd = NULL;
88 int i;
89
90 PMD_INIT_FUNC_TRACE();
91
92 for (i = 0; i < rxq->nb_rx_desc; i++) {
93 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
94 rxd->buf_addr = 0;
95 rxd->hdr_addr = 0;
96 }
97
98 rxq->rx_tail = 0;
99 }
100
101 int
atl_rx_queue_setup(struct rte_eth_dev * dev,uint16_t rx_queue_id,uint16_t nb_rx_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mb_pool)102 atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
103 uint16_t nb_rx_desc, unsigned int socket_id,
104 const struct rte_eth_rxconf *rx_conf,
105 struct rte_mempool *mb_pool)
106 {
107 struct atl_rx_queue *rxq;
108 const struct rte_memzone *mz;
109
110 PMD_INIT_FUNC_TRACE();
111
112 /* make sure a valid number of descriptors have been requested */
113 if (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE ||
114 nb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) {
115 PMD_INIT_LOG(ERR, "Number of Rx descriptors must be "
116 "less than or equal to %d, "
117 "greater than or equal to %d", AQ_HW_MAX_RX_RING_SIZE,
118 AQ_HW_MIN_RX_RING_SIZE);
119 return -EINVAL;
120 }
121
122 /*
123 * if this queue existed already, free the associated memory. The
124 * queue cannot be reused in case we need to allocate memory on
125 * different socket than was previously used.
126 */
127 if (dev->data->rx_queues[rx_queue_id] != NULL) {
128 atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
129 dev->data->rx_queues[rx_queue_id] = NULL;
130 }
131
132 /* allocate memory for the queue structure */
133 rxq = rte_zmalloc_socket("atlantic Rx queue", sizeof(*rxq),
134 RTE_CACHE_LINE_SIZE, socket_id);
135 if (rxq == NULL) {
136 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
137 return -ENOMEM;
138 }
139
140 /* setup queue */
141 rxq->mb_pool = mb_pool;
142 rxq->nb_rx_desc = nb_rx_desc;
143 rxq->port_id = dev->data->port_id;
144 rxq->queue_id = rx_queue_id;
145 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
146
147 rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
148 DEV_RX_OFFLOAD_IPV4_CKSUM;
149 rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
150 (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
151 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
152 PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
153
154 /* allocate memory for the software ring */
155 rxq->sw_ring = rte_zmalloc_socket("atlantic sw rx ring",
156 nb_rx_desc * sizeof(struct atl_rx_entry),
157 RTE_CACHE_LINE_SIZE, socket_id);
158 if (rxq->sw_ring == NULL) {
159 PMD_INIT_LOG(ERR,
160 "Port %d: Cannot allocate software ring for queue %d",
161 rxq->port_id, rxq->queue_id);
162 rte_free(rxq);
163 return -ENOMEM;
164 }
165
166 /*
167 * allocate memory for the hardware descriptor ring. A memzone large
168 * enough to hold the maximum ring size is requested to allow for
169 * resizing in later calls to the queue setup function.
170 */
171 mz = rte_eth_dma_zone_reserve(dev, "rx hw_ring", rx_queue_id,
172 HW_ATL_B0_MAX_RXD *
173 sizeof(struct hw_atl_rxd_s),
174 128, socket_id);
175 if (mz == NULL) {
176 PMD_INIT_LOG(ERR,
177 "Port %d: Cannot allocate hardware ring for queue %d",
178 rxq->port_id, rxq->queue_id);
179 rte_free(rxq->sw_ring);
180 rte_free(rxq);
181 return -ENOMEM;
182 }
183 rxq->hw_ring = mz->addr;
184 rxq->hw_ring_phys_addr = mz->iova;
185
186 atl_reset_rx_queue(rxq);
187
188 dev->data->rx_queues[rx_queue_id] = rxq;
189 return 0;
190 }
191
192 static inline void
atl_reset_tx_queue(struct atl_tx_queue * txq)193 atl_reset_tx_queue(struct atl_tx_queue *txq)
194 {
195 struct atl_tx_entry *tx_entry;
196 union hw_atl_txc_s *txc;
197 uint16_t i;
198
199 PMD_INIT_FUNC_TRACE();
200
201 if (!txq) {
202 PMD_DRV_LOG(ERR, "Pointer to txq is NULL");
203 return;
204 }
205
206 tx_entry = txq->sw_ring;
207
208 for (i = 0; i < txq->nb_tx_desc; i++) {
209 txc = (union hw_atl_txc_s *)&txq->hw_ring[i];
210 txc->flags1 = 0;
211 txc->flags2 = 2;
212 }
213
214 for (i = 0; i < txq->nb_tx_desc; i++) {
215 txq->hw_ring[i].dd = 1;
216 tx_entry[i].mbuf = NULL;
217 }
218
219 txq->tx_tail = 0;
220 txq->tx_head = 0;
221 txq->tx_free = txq->nb_tx_desc - 1;
222 }
223
224 int
atl_tx_queue_setup(struct rte_eth_dev * dev,uint16_t tx_queue_id,uint16_t nb_tx_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)225 atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
226 uint16_t nb_tx_desc, unsigned int socket_id,
227 const struct rte_eth_txconf *tx_conf)
228 {
229 struct atl_tx_queue *txq;
230 const struct rte_memzone *mz;
231
232 PMD_INIT_FUNC_TRACE();
233
234 /* make sure a valid number of descriptors have been requested */
235 if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE ||
236 nb_tx_desc > AQ_HW_MAX_TX_RING_SIZE) {
237 PMD_INIT_LOG(ERR, "Number of Tx descriptors must be "
238 "less than or equal to %d, "
239 "greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE,
240 AQ_HW_MIN_TX_RING_SIZE);
241 return -EINVAL;
242 }
243
244 /*
245 * if this queue existed already, free the associated memory. The
246 * queue cannot be reused in case we need to allocate memory on
247 * different socket than was previously used.
248 */
249 if (dev->data->tx_queues[tx_queue_id] != NULL) {
250 atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
251 dev->data->tx_queues[tx_queue_id] = NULL;
252 }
253
254 /* allocate memory for the queue structure */
255 txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq),
256 RTE_CACHE_LINE_SIZE, socket_id);
257 if (txq == NULL) {
258 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
259 return -ENOMEM;
260 }
261
262 /* setup queue */
263 txq->nb_tx_desc = nb_tx_desc;
264 txq->port_id = dev->data->port_id;
265 txq->queue_id = tx_queue_id;
266 txq->tx_free_thresh = tx_conf->tx_free_thresh;
267
268
269 /* allocate memory for the software ring */
270 txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring",
271 nb_tx_desc * sizeof(struct atl_tx_entry),
272 RTE_CACHE_LINE_SIZE, socket_id);
273 if (txq->sw_ring == NULL) {
274 PMD_INIT_LOG(ERR,
275 "Port %d: Cannot allocate software ring for queue %d",
276 txq->port_id, txq->queue_id);
277 rte_free(txq);
278 return -ENOMEM;
279 }
280
281 /*
282 * allocate memory for the hardware descriptor ring. A memzone large
283 * enough to hold the maximum ring size is requested to allow for
284 * resizing in later calls to the queue setup function.
285 */
286 mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id,
287 HW_ATL_B0_MAX_TXD * sizeof(struct hw_atl_txd_s),
288 128, socket_id);
289 if (mz == NULL) {
290 PMD_INIT_LOG(ERR,
291 "Port %d: Cannot allocate hardware ring for queue %d",
292 txq->port_id, txq->queue_id);
293 rte_free(txq->sw_ring);
294 rte_free(txq);
295 return -ENOMEM;
296 }
297 txq->hw_ring = mz->addr;
298 txq->hw_ring_phys_addr = mz->iova;
299
300 atl_reset_tx_queue(txq);
301
302 dev->data->tx_queues[tx_queue_id] = txq;
303 return 0;
304 }
305
306 int
atl_tx_init(struct rte_eth_dev * eth_dev)307 atl_tx_init(struct rte_eth_dev *eth_dev)
308 {
309 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
310 struct atl_tx_queue *txq;
311 uint64_t base_addr = 0;
312 int i = 0;
313 int err = 0;
314
315 PMD_INIT_FUNC_TRACE();
316
317 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
318 txq = eth_dev->data->tx_queues[i];
319 base_addr = txq->hw_ring_phys_addr;
320
321 err = hw_atl_b0_hw_ring_tx_init(hw, base_addr,
322 txq->queue_id,
323 txq->nb_tx_desc, 0,
324 txq->port_id);
325
326 if (err) {
327 PMD_INIT_LOG(ERR,
328 "Port %d: Cannot init TX queue %d",
329 txq->port_id, txq->queue_id);
330 break;
331 }
332 }
333
334 return err;
335 }
336
337 int
atl_rx_init(struct rte_eth_dev * eth_dev)338 atl_rx_init(struct rte_eth_dev *eth_dev)
339 {
340 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
341 struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss;
342 struct atl_rx_queue *rxq;
343 uint64_t base_addr = 0;
344 int i = 0;
345 int err = 0;
346
347 PMD_INIT_FUNC_TRACE();
348
349 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
350 rxq = eth_dev->data->rx_queues[i];
351 base_addr = rxq->hw_ring_phys_addr;
352
353 /* Take requested pool mbuf size and adapt
354 * descriptor buffer to best fit
355 */
356 int buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
357 RTE_PKTMBUF_HEADROOM;
358
359 buff_size = RTE_ALIGN_FLOOR(buff_size, 1024);
360 if (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) {
361 PMD_INIT_LOG(WARNING,
362 "Port %d queue %d: mem pool buff size is too big\n",
363 rxq->port_id, rxq->queue_id);
364 buff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX;
365 }
366 if (buff_size < 1024) {
367 PMD_INIT_LOG(ERR,
368 "Port %d queue %d: mem pool buff size is too small\n",
369 rxq->port_id, rxq->queue_id);
370 return -EINVAL;
371 }
372 rxq->buff_size = buff_size;
373
374 err = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id,
375 rxq->nb_rx_desc, buff_size, 0,
376 rxq->port_id);
377
378 if (err) {
379 PMD_INIT_LOG(ERR, "Port %d: Cannot init RX queue %d",
380 rxq->port_id, rxq->queue_id);
381 break;
382 }
383 }
384
385 for (i = rss_params->indirection_table_size; i--;)
386 rss_params->indirection_table[i] = i &
387 (eth_dev->data->nb_rx_queues - 1);
388 hw_atl_b0_hw_rss_set(hw, rss_params);
389 return err;
390 }
391
392 static int
atl_alloc_rx_queue_mbufs(struct atl_rx_queue * rxq)393 atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq)
394 {
395 struct atl_rx_entry *rx_entry = rxq->sw_ring;
396 struct hw_atl_rxd_s *rxd;
397 uint64_t dma_addr = 0;
398 uint32_t i = 0;
399
400 PMD_INIT_FUNC_TRACE();
401
402 /* fill Rx ring */
403 for (i = 0; i < rxq->nb_rx_desc; i++) {
404 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
405
406 if (mbuf == NULL) {
407 PMD_INIT_LOG(ERR,
408 "Port %d: mbuf alloc failed for rx queue %d",
409 rxq->port_id, rxq->queue_id);
410 return -ENOMEM;
411 }
412
413 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
414 mbuf->port = rxq->port_id;
415
416 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
417 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
418 rxd->buf_addr = dma_addr;
419 rxd->hdr_addr = 0;
420 rx_entry[i].mbuf = mbuf;
421 }
422
423 return 0;
424 }
425
426 static void
atl_rx_queue_release_mbufs(struct atl_rx_queue * rxq)427 atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq)
428 {
429 int i;
430
431 PMD_INIT_FUNC_TRACE();
432
433 if (rxq->sw_ring != NULL) {
434 for (i = 0; i < rxq->nb_rx_desc; i++) {
435 if (rxq->sw_ring[i].mbuf != NULL) {
436 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
437 rxq->sw_ring[i].mbuf = NULL;
438 }
439 }
440 }
441 }
442
443 int
atl_rx_queue_start(struct rte_eth_dev * dev,uint16_t rx_queue_id)444 atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
445 {
446 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
447 struct atl_rx_queue *rxq = NULL;
448
449 PMD_INIT_FUNC_TRACE();
450
451 if (rx_queue_id < dev->data->nb_rx_queues) {
452 rxq = dev->data->rx_queues[rx_queue_id];
453
454 if (atl_alloc_rx_queue_mbufs(rxq) != 0) {
455 PMD_INIT_LOG(ERR,
456 "Port %d: Allocate mbufs for queue %d failed",
457 rxq->port_id, rxq->queue_id);
458 return -1;
459 }
460
461 hw_atl_b0_hw_ring_rx_start(hw, rx_queue_id);
462
463 rte_wmb();
464 hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1,
465 rx_queue_id);
466 dev->data->rx_queue_state[rx_queue_id] =
467 RTE_ETH_QUEUE_STATE_STARTED;
468 } else {
469 return -1;
470 }
471
472 return 0;
473 }
474
475 int
atl_rx_queue_stop(struct rte_eth_dev * dev,uint16_t rx_queue_id)476 atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
477 {
478 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
479 struct atl_rx_queue *rxq = NULL;
480
481 PMD_INIT_FUNC_TRACE();
482
483 if (rx_queue_id < dev->data->nb_rx_queues) {
484 rxq = dev->data->rx_queues[rx_queue_id];
485
486 hw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id);
487
488 atl_rx_queue_release_mbufs(rxq);
489 atl_reset_rx_queue(rxq);
490
491 dev->data->rx_queue_state[rx_queue_id] =
492 RTE_ETH_QUEUE_STATE_STOPPED;
493 } else {
494 return -1;
495 }
496
497 return 0;
498 }
499
500 void
atl_rx_queue_release(void * rx_queue)501 atl_rx_queue_release(void *rx_queue)
502 {
503 PMD_INIT_FUNC_TRACE();
504
505 if (rx_queue != NULL) {
506 struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
507
508 atl_rx_queue_release_mbufs(rxq);
509 rte_free(rxq->sw_ring);
510 rte_free(rxq);
511 }
512 }
513
514 static void
atl_tx_queue_release_mbufs(struct atl_tx_queue * txq)515 atl_tx_queue_release_mbufs(struct atl_tx_queue *txq)
516 {
517 int i;
518
519 PMD_INIT_FUNC_TRACE();
520
521 if (txq->sw_ring != NULL) {
522 for (i = 0; i < txq->nb_tx_desc; i++) {
523 if (txq->sw_ring[i].mbuf != NULL) {
524 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
525 txq->sw_ring[i].mbuf = NULL;
526 }
527 }
528 }
529 }
530
531 int
atl_tx_queue_start(struct rte_eth_dev * dev,uint16_t tx_queue_id)532 atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
533 {
534 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
535
536 PMD_INIT_FUNC_TRACE();
537
538 if (tx_queue_id < dev->data->nb_tx_queues) {
539 hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id);
540
541 rte_wmb();
542 hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id);
543 dev->data->tx_queue_state[tx_queue_id] =
544 RTE_ETH_QUEUE_STATE_STARTED;
545 } else {
546 return -1;
547 }
548
549 return 0;
550 }
551
552 int
atl_tx_queue_stop(struct rte_eth_dev * dev,uint16_t tx_queue_id)553 atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
554 {
555 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
556 struct atl_tx_queue *txq;
557
558 PMD_INIT_FUNC_TRACE();
559
560 txq = dev->data->tx_queues[tx_queue_id];
561
562 hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id);
563
564 atl_tx_queue_release_mbufs(txq);
565 atl_reset_tx_queue(txq);
566 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
567
568 return 0;
569 }
570
571 void
atl_tx_queue_release(void * tx_queue)572 atl_tx_queue_release(void *tx_queue)
573 {
574 PMD_INIT_FUNC_TRACE();
575
576 if (tx_queue != NULL) {
577 struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
578
579 atl_tx_queue_release_mbufs(txq);
580 rte_free(txq->sw_ring);
581 rte_free(txq);
582 }
583 }
584
585 void
atl_free_queues(struct rte_eth_dev * dev)586 atl_free_queues(struct rte_eth_dev *dev)
587 {
588 unsigned int i;
589
590 PMD_INIT_FUNC_TRACE();
591
592 for (i = 0; i < dev->data->nb_rx_queues; i++) {
593 atl_rx_queue_release(dev->data->rx_queues[i]);
594 dev->data->rx_queues[i] = 0;
595 }
596 dev->data->nb_rx_queues = 0;
597
598 for (i = 0; i < dev->data->nb_tx_queues; i++) {
599 atl_tx_queue_release(dev->data->tx_queues[i]);
600 dev->data->tx_queues[i] = 0;
601 }
602 dev->data->nb_tx_queues = 0;
603 }
604
605 int
atl_start_queues(struct rte_eth_dev * dev)606 atl_start_queues(struct rte_eth_dev *dev)
607 {
608 int i;
609
610 PMD_INIT_FUNC_TRACE();
611
612 for (i = 0; i < dev->data->nb_tx_queues; i++) {
613 if (atl_tx_queue_start(dev, i) != 0) {
614 PMD_DRV_LOG(ERR,
615 "Port %d: Start Tx queue %d failed",
616 dev->data->port_id, i);
617 return -1;
618 }
619 }
620
621 for (i = 0; i < dev->data->nb_rx_queues; i++) {
622 if (atl_rx_queue_start(dev, i) != 0) {
623 PMD_DRV_LOG(ERR,
624 "Port %d: Start Rx queue %d failed",
625 dev->data->port_id, i);
626 return -1;
627 }
628 }
629
630 return 0;
631 }
632
633 int
atl_stop_queues(struct rte_eth_dev * dev)634 atl_stop_queues(struct rte_eth_dev *dev)
635 {
636 int i;
637
638 PMD_INIT_FUNC_TRACE();
639
640 for (i = 0; i < dev->data->nb_tx_queues; i++) {
641 if (atl_tx_queue_stop(dev, i) != 0) {
642 PMD_DRV_LOG(ERR,
643 "Port %d: Stop Tx queue %d failed",
644 dev->data->port_id, i);
645 return -1;
646 }
647 }
648
649 for (i = 0; i < dev->data->nb_rx_queues; i++) {
650 if (atl_rx_queue_stop(dev, i) != 0) {
651 PMD_DRV_LOG(ERR,
652 "Port %d: Stop Rx queue %d failed",
653 dev->data->port_id, i);
654 return -1;
655 }
656 }
657
658 return 0;
659 }
660
661 void
atl_rxq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)662 atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
663 struct rte_eth_rxq_info *qinfo)
664 {
665 struct atl_rx_queue *rxq;
666
667 PMD_INIT_FUNC_TRACE();
668
669 rxq = dev->data->rx_queues[queue_id];
670
671 qinfo->mp = rxq->mb_pool;
672 qinfo->scattered_rx = dev->data->scattered_rx;
673 qinfo->nb_desc = rxq->nb_rx_desc;
674 }
675
676 void
atl_txq_info_get(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)677 atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
678 struct rte_eth_txq_info *qinfo)
679 {
680 struct atl_tx_queue *txq;
681
682 PMD_INIT_FUNC_TRACE();
683
684 txq = dev->data->tx_queues[queue_id];
685
686 qinfo->nb_desc = txq->nb_tx_desc;
687 }
688
689 /* Return Rx queue avail count */
690
691 uint32_t
atl_rx_queue_count(struct rte_eth_dev * dev,uint16_t rx_queue_id)692 atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
693 {
694 struct atl_rx_queue *rxq;
695
696 PMD_INIT_FUNC_TRACE();
697
698 if (rx_queue_id >= dev->data->nb_rx_queues) {
699 PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
700 return 0;
701 }
702
703 rxq = dev->data->rx_queues[rx_queue_id];
704
705 if (rxq == NULL)
706 return 0;
707
708 return rxq->nb_rx_desc - rxq->nb_rx_hold;
709 }
710
711 int
atl_dev_rx_descriptor_status(void * rx_queue,uint16_t offset)712 atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
713 {
714 struct atl_rx_queue *rxq = rx_queue;
715 struct hw_atl_rxd_wb_s *rxd;
716 uint32_t idx;
717
718 PMD_INIT_FUNC_TRACE();
719
720 if (unlikely(offset >= rxq->nb_rx_desc))
721 return -EINVAL;
722
723 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
724 return RTE_ETH_RX_DESC_UNAVAIL;
725
726 idx = rxq->rx_tail + offset;
727
728 if (idx >= rxq->nb_rx_desc)
729 idx -= rxq->nb_rx_desc;
730
731 rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];
732
733 if (rxd->dd)
734 return RTE_ETH_RX_DESC_DONE;
735
736 return RTE_ETH_RX_DESC_AVAIL;
737 }
738
739 int
atl_dev_tx_descriptor_status(void * tx_queue,uint16_t offset)740 atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
741 {
742 struct atl_tx_queue *txq = tx_queue;
743 struct hw_atl_txd_s *txd;
744 uint32_t idx;
745
746 PMD_INIT_FUNC_TRACE();
747
748 if (unlikely(offset >= txq->nb_tx_desc))
749 return -EINVAL;
750
751 idx = txq->tx_tail + offset;
752
753 if (idx >= txq->nb_tx_desc)
754 idx -= txq->nb_tx_desc;
755
756 txd = &txq->hw_ring[idx];
757
758 if (txd->dd)
759 return RTE_ETH_TX_DESC_DONE;
760
761 return RTE_ETH_TX_DESC_FULL;
762 }
763
764 static int
atl_rx_enable_intr(struct rte_eth_dev * dev,uint16_t queue_id,bool enable)765 atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)
766 {
767 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
768 struct atl_rx_queue *rxq;
769
770 PMD_INIT_FUNC_TRACE();
771
772 if (queue_id >= dev->data->nb_rx_queues) {
773 PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", queue_id);
774 return -EINVAL;
775 }
776
777 rxq = dev->data->rx_queues[queue_id];
778
779 if (rxq == NULL)
780 return 0;
781
782 /* Mapping interrupt vector */
783 hw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id);
784
785 return 0;
786 }
787
788 int
atl_dev_rx_queue_intr_enable(struct rte_eth_dev * eth_dev,uint16_t queue_id)789 atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
790 {
791 return atl_rx_enable_intr(eth_dev, queue_id, true);
792 }
793
794 int
atl_dev_rx_queue_intr_disable(struct rte_eth_dev * eth_dev,uint16_t queue_id)795 atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
796 {
797 return atl_rx_enable_intr(eth_dev, queue_id, false);
798 }
799
800 uint16_t
atl_prep_pkts(__rte_unused void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)801 atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
802 uint16_t nb_pkts)
803 {
804 int i, ret;
805 uint64_t ol_flags;
806 struct rte_mbuf *m;
807
808 PMD_INIT_FUNC_TRACE();
809
810 for (i = 0; i < nb_pkts; i++) {
811 m = tx_pkts[i];
812 ol_flags = m->ol_flags;
813
814 if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
815 rte_errno = EINVAL;
816 return i;
817 }
818
819 if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
820 rte_errno = ENOTSUP;
821 return i;
822 }
823
824 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
825 ret = rte_validate_tx_offload(m);
826 if (ret != 0) {
827 rte_errno = -ret;
828 return i;
829 }
830 #endif
831 ret = rte_net_intel_cksum_prepare(m);
832 if (ret != 0) {
833 rte_errno = -ret;
834 return i;
835 }
836 }
837
838 return i;
839 }
840
841 static uint64_t
atl_desc_to_offload_flags(struct atl_rx_queue * rxq,struct hw_atl_rxd_wb_s * rxd_wb)842 atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
843 struct hw_atl_rxd_wb_s *rxd_wb)
844 {
845 uint64_t mbuf_flags = 0;
846
847 PMD_INIT_FUNC_TRACE();
848
849 /* IPv4 ? */
850 if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
851 /* IPv4 csum error ? */
852 if (rxd_wb->rx_stat & BIT(1))
853 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
854 else
855 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
856 } else {
857 mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
858 }
859
860 /* CSUM calculated ? */
861 if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
862 if (rxd_wb->rx_stat & BIT(2))
863 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
864 else
865 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
866 } else {
867 mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
868 }
869
870 return mbuf_flags;
871 }
872
873 static uint32_t
atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s * rxd_wb)874 atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb)
875 {
876 uint32_t type = RTE_PTYPE_UNKNOWN;
877 uint16_t l2_l3_type = rxd_wb->pkt_type & 0x3;
878 uint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2;
879
880 switch (l2_l3_type) {
881 case 0:
882 type = RTE_PTYPE_L3_IPV4;
883 break;
884 case 1:
885 type = RTE_PTYPE_L3_IPV6;
886 break;
887 case 2:
888 type = RTE_PTYPE_L2_ETHER;
889 break;
890 case 3:
891 type = RTE_PTYPE_L2_ETHER_ARP;
892 break;
893 }
894
895 switch (l4_type) {
896 case 0:
897 type |= RTE_PTYPE_L4_TCP;
898 break;
899 case 1:
900 type |= RTE_PTYPE_L4_UDP;
901 break;
902 case 2:
903 type |= RTE_PTYPE_L4_SCTP;
904 break;
905 case 3:
906 type |= RTE_PTYPE_L4_ICMP;
907 break;
908 }
909
910 if (rxd_wb->pkt_type & BIT(5))
911 type |= RTE_PTYPE_L2_ETHER_VLAN;
912
913 return type;
914 }
915
916 uint16_t
atl_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)917 atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
918 {
919 struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
920 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
921 struct atl_adapter *adapter =
922 ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
923 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
924 struct aq_hw_cfg_s *cfg =
925 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
926 struct atl_rx_entry *sw_ring = rxq->sw_ring;
927
928 struct rte_mbuf *new_mbuf;
929 struct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first;
930 struct atl_rx_entry *rx_entry;
931 uint16_t nb_rx = 0;
932 uint16_t nb_hold = 0;
933 struct hw_atl_rxd_wb_s rxd_wb;
934 struct hw_atl_rxd_s *rxd = NULL;
935 uint16_t tail = rxq->rx_tail;
936 uint64_t dma_addr;
937 uint16_t pkt_len = 0;
938
939 while (nb_rx < nb_pkts) {
940 uint16_t eop_tail = tail;
941
942 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
943 rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
944
945 if (!rxd_wb.dd) { /* RxD is not done */
946 break;
947 }
948
949 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u "
950 "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
951 (unsigned int)rxq->port_id,
952 (unsigned int)rxq->queue_id,
953 (unsigned int)tail, (unsigned int)rxd_wb.eop,
954 (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len),
955 rxd_wb.rss_hash, rxd_wb.rss_type);
956
957 /* RxD is not done */
958 if (!rxd_wb.eop) {
959 while (true) {
960 struct hw_atl_rxd_wb_s *eop_rxwbd;
961
962 eop_tail = (eop_tail + 1) % rxq->nb_rx_desc;
963 eop_rxwbd = (struct hw_atl_rxd_wb_s *)
964 &rxq->hw_ring[eop_tail];
965 if (!eop_rxwbd->dd) {
966 /* no EOP received yet */
967 eop_tail = tail;
968 break;
969 }
970 if (eop_rxwbd->dd && eop_rxwbd->eop)
971 break;
972 }
973 /* No EOP in ring */
974 if (eop_tail == tail)
975 break;
976 }
977 rx_mbuf_prev = NULL;
978 rx_mbuf_first = NULL;
979
980 /* Run through packet segments */
981 while (true) {
982 new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
983 if (new_mbuf == NULL) {
984 PMD_RX_LOG(DEBUG,
985 "RX mbuf alloc failed port_id=%u "
986 "queue_id=%u", (unsigned int)rxq->port_id,
987 (unsigned int)rxq->queue_id);
988 dev->data->rx_mbuf_alloc_failed++;
989 adapter->sw_stats.rx_nombuf++;
990 goto err_stop;
991 }
992
993 nb_hold++;
994 rx_entry = &sw_ring[tail];
995
996 rx_mbuf = rx_entry->mbuf;
997 rx_entry->mbuf = new_mbuf;
998 dma_addr = rte_cpu_to_le_64(
999 rte_mbuf_data_iova_default(new_mbuf));
1000
1001 /* setup RX descriptor */
1002 rxd->hdr_addr = 0;
1003 rxd->buf_addr = dma_addr;
1004
1005 /*
1006 * Initialize the returned mbuf.
1007 * 1) setup generic mbuf fields:
1008 * - number of segments,
1009 * - next segment,
1010 * - packet length,
1011 * - RX port identifier.
1012 * 2) integrate hardware offload data, if any:
1013 * < - RSS flag & hash,
1014 * - IP checksum flag,
1015 * - VLAN TCI, if any,
1016 * - error flags.
1017 */
1018 pkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len);
1019 rx_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1020 rte_prefetch1((char *)rx_mbuf->buf_addr +
1021 rx_mbuf->data_off);
1022 rx_mbuf->nb_segs = 0;
1023 rx_mbuf->next = NULL;
1024 rx_mbuf->pkt_len = pkt_len;
1025 rx_mbuf->data_len = pkt_len;
1026 if (rxd_wb.eop) {
1027 u16 remainder_len = pkt_len % rxq->buff_size;
1028 if (!remainder_len)
1029 remainder_len = rxq->buff_size;
1030 rx_mbuf->data_len = remainder_len;
1031 } else {
1032 rx_mbuf->data_len = pkt_len > rxq->buff_size ?
1033 rxq->buff_size : pkt_len;
1034 }
1035 rx_mbuf->port = rxq->port_id;
1036
1037 rx_mbuf->hash.rss = rxd_wb.rss_hash;
1038
1039 rx_mbuf->vlan_tci = rxd_wb.vlan;
1040
1041 rx_mbuf->ol_flags =
1042 atl_desc_to_offload_flags(rxq, &rxd_wb);
1043
1044 rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
1045
1046 if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
1047 rx_mbuf->ol_flags |= PKT_RX_VLAN;
1048 rx_mbuf->vlan_tci = rxd_wb.vlan;
1049
1050 if (cfg->vlan_strip)
1051 rx_mbuf->ol_flags |=
1052 PKT_RX_VLAN_STRIPPED;
1053 }
1054
1055 if (!rx_mbuf_first)
1056 rx_mbuf_first = rx_mbuf;
1057 rx_mbuf_first->nb_segs++;
1058
1059 if (rx_mbuf_prev)
1060 rx_mbuf_prev->next = rx_mbuf;
1061 rx_mbuf_prev = rx_mbuf;
1062
1063 tail = (tail + 1) % rxq->nb_rx_desc;
1064 /* Prefetch next mbufs */
1065 rte_prefetch0(sw_ring[tail].mbuf);
1066 if ((tail & 0x3) == 0) {
1067 rte_prefetch0(&sw_ring[tail]);
1068 rte_prefetch0(&sw_ring[tail]);
1069 }
1070
1071 /* filled mbuf_first */
1072 if (rxd_wb.eop)
1073 break;
1074 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
1075 rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
1076 };
1077
1078 /*
1079 * Store the mbuf address into the next entry of the array
1080 * of returned packets.
1081 */
1082 rx_pkts[nb_rx++] = rx_mbuf_first;
1083 adapter->sw_stats.q_ipackets[rxq->queue_id]++;
1084 adapter->sw_stats.q_ibytes[rxq->queue_id] +=
1085 rx_mbuf_first->pkt_len;
1086
1087 PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d",
1088 rx_mbuf_first->nb_segs,
1089 rx_mbuf_first->pkt_len);
1090 }
1091
1092 err_stop:
1093
1094 rxq->rx_tail = tail;
1095
1096 /*
1097 * If the number of free RX descriptors is greater than the RX free
1098 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1099 * register.
1100 * Update the RDT with the value of the last processed RX descriptor
1101 * minus 1, to guarantee that the RDT register is never equal to the
1102 * RDH register, which creates a "full" ring situtation from the
1103 * hardware point of view...
1104 */
1105 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1106 if (nb_hold > rxq->rx_free_thresh) {
1107 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1108 "nb_hold=%u nb_rx=%u",
1109 (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
1110 (unsigned int)tail, (unsigned int)nb_hold,
1111 (unsigned int)nb_rx);
1112 tail = (uint16_t)((tail == 0) ?
1113 (rxq->nb_rx_desc - 1) : (tail - 1));
1114
1115 hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id);
1116
1117 nb_hold = 0;
1118 }
1119
1120 rxq->nb_rx_hold = nb_hold;
1121
1122 return nb_rx;
1123 }
1124
1125 static void
atl_xmit_cleanup(struct atl_tx_queue * txq)1126 atl_xmit_cleanup(struct atl_tx_queue *txq)
1127 {
1128 struct atl_tx_entry *sw_ring;
1129 struct hw_atl_txd_s *txd;
1130 int to_clean = 0;
1131
1132 if (txq != NULL) {
1133 sw_ring = txq->sw_ring;
1134 int head = txq->tx_head;
1135 int cnt;
1136 int i;
1137
1138 for (i = 0, cnt = head; ; i++) {
1139 txd = &txq->hw_ring[cnt];
1140
1141 if (txd->dd)
1142 to_clean++;
1143
1144 cnt = (cnt + 1) % txq->nb_tx_desc;
1145 if (cnt == txq->tx_tail)
1146 break;
1147 }
1148
1149 if (to_clean == 0)
1150 return;
1151
1152 while (to_clean) {
1153 txd = &txq->hw_ring[head];
1154
1155 struct atl_tx_entry *rx_entry = &sw_ring[head];
1156
1157 if (rx_entry->mbuf) {
1158 rte_pktmbuf_free_seg(rx_entry->mbuf);
1159 rx_entry->mbuf = NULL;
1160 }
1161
1162 if (txd->dd)
1163 to_clean--;
1164
1165 txd->buf_addr = 0;
1166 txd->flags = 0;
1167
1168 head = (head + 1) % txq->nb_tx_desc;
1169 txq->tx_free++;
1170 }
1171
1172 txq->tx_head = head;
1173 }
1174 }
1175
1176 static int
atl_tso_setup(struct rte_mbuf * tx_pkt,union hw_atl_txc_s * txc)1177 atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
1178 {
1179 uint32_t tx_cmd = 0;
1180 uint64_t ol_flags = tx_pkt->ol_flags;
1181
1182 if (ol_flags & PKT_TX_TCP_SEG) {
1183 tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
1184
1185 txc->cmd = 0x4;
1186
1187 if (ol_flags & PKT_TX_IPV6)
1188 txc->cmd |= 0x2;
1189
1190 txc->l2_len = tx_pkt->l2_len;
1191 txc->l3_len = tx_pkt->l3_len;
1192 txc->l4_len = tx_pkt->l4_len;
1193
1194 txc->mss_len = tx_pkt->tso_segsz;
1195 }
1196
1197 if (ol_flags & PKT_TX_VLAN) {
1198 tx_cmd |= tx_desc_cmd_vlan;
1199 txc->vlan_tag = tx_pkt->vlan_tci;
1200 }
1201
1202 if (tx_cmd) {
1203 txc->type = tx_desc_type_ctx;
1204 txc->idx = 0;
1205 }
1206
1207 return tx_cmd;
1208 }
1209
1210 static inline void
atl_setup_csum_offload(struct rte_mbuf * mbuf,struct hw_atl_txd_s * txd,uint32_t tx_cmd)1211 atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
1212 uint32_t tx_cmd)
1213 {
1214 txd->cmd |= tx_desc_cmd_fcs;
1215 txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
1216 /* L4 csum requested */
1217 txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
1218 txd->cmd |= tx_cmd;
1219 }
1220
1221 static inline void
atl_xmit_pkt(struct aq_hw_s * hw,struct atl_tx_queue * txq,struct rte_mbuf * tx_pkt)1222 atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
1223 struct rte_mbuf *tx_pkt)
1224 {
1225 struct atl_adapter *adapter =
1226 ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
1227 uint32_t pay_len = 0;
1228 int tail = 0;
1229 struct atl_tx_entry *tx_entry;
1230 uint64_t buf_dma_addr;
1231 struct rte_mbuf *m_seg;
1232 union hw_atl_txc_s *txc = NULL;
1233 struct hw_atl_txd_s *txd = NULL;
1234 u32 tx_cmd = 0U;
1235 int desc_count = 0;
1236
1237 tail = txq->tx_tail;
1238
1239 txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
1240
1241 txc->flags1 = 0U;
1242 txc->flags2 = 0U;
1243
1244 tx_cmd = atl_tso_setup(tx_pkt, txc);
1245
1246 if (tx_cmd) {
1247 /* We've consumed the first desc, adjust counters */
1248 tail = (tail + 1) % txq->nb_tx_desc;
1249 txq->tx_tail = tail;
1250 txq->tx_free -= 1;
1251
1252 txd = &txq->hw_ring[tail];
1253 txd->flags = 0U;
1254 } else {
1255 txd = (struct hw_atl_txd_s *)txc;
1256 }
1257
1258 txd->ct_en = !!tx_cmd;
1259
1260 txd->type = tx_desc_type_desc;
1261
1262 atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
1263
1264 if (tx_cmd)
1265 txd->ct_idx = 0;
1266
1267 pay_len = tx_pkt->pkt_len;
1268
1269 txd->pay_len = pay_len;
1270
1271 for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
1272 if (desc_count > 0) {
1273 txd = &txq->hw_ring[tail];
1274 txd->flags = 0U;
1275 }
1276
1277 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1278 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
1279
1280 txd->type = tx_desc_type_desc;
1281 txd->len = m_seg->data_len;
1282 txd->pay_len = pay_len;
1283
1284 /* Store mbuf for freeing later */
1285 tx_entry = &txq->sw_ring[tail];
1286
1287 if (tx_entry->mbuf)
1288 rte_pktmbuf_free_seg(tx_entry->mbuf);
1289 tx_entry->mbuf = m_seg;
1290
1291 tail = (tail + 1) % txq->nb_tx_desc;
1292
1293 desc_count++;
1294 }
1295
1296 // Last descriptor requires EOP and WB
1297 txd->eop = 1U;
1298 txd->cmd |= tx_desc_cmd_wb;
1299
1300 hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
1301
1302 txq->tx_tail = tail;
1303
1304 txq->tx_free -= desc_count;
1305
1306 adapter->sw_stats.q_opackets[txq->queue_id]++;
1307 adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
1308 }
1309
1310 uint16_t
atl_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)1311 atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1312 {
1313 struct rte_eth_dev *dev = NULL;
1314 struct aq_hw_s *hw = NULL;
1315 struct atl_tx_queue *txq = tx_queue;
1316 struct rte_mbuf *tx_pkt;
1317 uint16_t nb_tx;
1318
1319 dev = &rte_eth_devices[txq->port_id];
1320 hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1321
1322 PMD_TX_LOG(DEBUG,
1323 "port %d txq %d pkts: %d tx_free=%d tx_tail=%d tx_head=%d",
1324 txq->port_id, txq->queue_id, nb_pkts, txq->tx_free,
1325 txq->tx_tail, txq->tx_head);
1326
1327 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1328 tx_pkt = *tx_pkts++;
1329
1330 /* Clean Tx queue if needed */
1331 if (txq->tx_free < txq->tx_free_thresh)
1332 atl_xmit_cleanup(txq);
1333
1334 /* Check if we have enough free descriptors */
1335 if (txq->tx_free < tx_pkt->nb_segs)
1336 break;
1337
1338 /* check mbuf is valid */
1339 if ((tx_pkt->nb_segs == 0) ||
1340 ((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL)))
1341 break;
1342
1343 /* Send the packet */
1344 atl_xmit_pkt(hw, txq, tx_pkt);
1345 }
1346
1347 PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx);
1348
1349 return nb_tx;
1350 }
1351