1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
4 */
5
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_bus_pci.h>
17 #include <rte_common.h>
18 #include <rte_eal_paging.h>
19
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
22 #include <mlx5_malloc.h>
23
24 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
26 #include "mlx5.h"
27 #include "mlx5_tx.h"
28 #include "mlx5_rxtx.h"
29 #include "mlx5_autoconf.h"
30
31 /**
32 * Allocate TX queue elements.
33 *
34 * @param txq_ctrl
35 * Pointer to TX queue structure.
36 */
37 void
txq_alloc_elts(struct mlx5_txq_ctrl * txq_ctrl)38 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
39 {
40 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
41 unsigned int i;
42
43 for (i = 0; (i != elts_n); ++i)
44 txq_ctrl->txq.elts[i] = NULL;
45 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
46 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
47 txq_ctrl->txq.elts_head = 0;
48 txq_ctrl->txq.elts_tail = 0;
49 txq_ctrl->txq.elts_comp = 0;
50 }
51
52 /**
53 * Free TX queue elements.
54 *
55 * @param txq_ctrl
56 * Pointer to TX queue structure.
57 */
58 void
txq_free_elts(struct mlx5_txq_ctrl * txq_ctrl)59 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
60 {
61 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
62 const uint16_t elts_m = elts_n - 1;
63 uint16_t elts_head = txq_ctrl->txq.elts_head;
64 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
65 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
66
67 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
68 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
69 txq_ctrl->txq.elts_head = 0;
70 txq_ctrl->txq.elts_tail = 0;
71 txq_ctrl->txq.elts_comp = 0;
72
73 while (elts_tail != elts_head) {
74 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
75
76 MLX5_ASSERT(elt != NULL);
77 rte_pktmbuf_free_seg(elt);
78 #ifdef RTE_LIBRTE_MLX5_DEBUG
79 /* Poisoning. */
80 memset(&(*elts)[elts_tail & elts_m],
81 0x77,
82 sizeof((*elts)[elts_tail & elts_m]));
83 #endif
84 ++elts_tail;
85 }
86 }
87
88 /**
89 * Returns the per-port supported offloads.
90 *
91 * @param dev
92 * Pointer to Ethernet device.
93 *
94 * @return
95 * Supported Tx offloads.
96 */
97 uint64_t
mlx5_get_tx_port_offloads(struct rte_eth_dev * dev)98 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
99 {
100 struct mlx5_priv *priv = dev->data->dev_private;
101 uint64_t offloads = (RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
102 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
103 struct mlx5_port_config *config = &priv->config;
104 struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
105
106 if (dev_cap->hw_csum)
107 offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
108 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
109 RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
110 if (dev_cap->tso)
111 offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
112 if (priv->sh->config.tx_pp ||
113 priv->sh->cdev->config.hca_attr.wait_on_time)
114 offloads |= RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP;
115 if (dev_cap->swp) {
116 if (dev_cap->swp & MLX5_SW_PARSING_CSUM_CAP)
117 offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
118 if (dev_cap->swp & MLX5_SW_PARSING_TSO_CAP)
119 offloads |= (RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
120 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
121 }
122 if (dev_cap->tunnel_en) {
123 if (dev_cap->hw_csum)
124 offloads |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM;
125 if (dev_cap->tso) {
126 if (dev_cap->tunnel_en &
127 MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)
128 offloads |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
129 if (dev_cap->tunnel_en &
130 MLX5_TUNNELED_OFFLOADS_GRE_CAP)
131 offloads |= RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO;
132 if (dev_cap->tunnel_en &
133 MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)
134 offloads |= RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO;
135 }
136 }
137 if (!config->mprq.enabled)
138 offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
139 return offloads;
140 }
141
142 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
143 static void
txq_sync_cq(struct mlx5_txq_data * txq)144 txq_sync_cq(struct mlx5_txq_data *txq)
145 {
146 volatile struct mlx5_cqe *cqe;
147 int ret, i;
148
149 i = txq->cqe_s;
150 do {
151 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
152 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
153 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
154 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
155 /* No new CQEs in completion queue. */
156 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
157 break;
158 }
159 }
160 ++txq->cq_ci;
161 } while (--i);
162 /* Move all CQEs to HW ownership. */
163 for (i = 0; i < txq->cqe_s; i++) {
164 cqe = &txq->cqes[i];
165 cqe->op_own = MLX5_CQE_INVALIDATE;
166 }
167 /* Resync CQE and WQE (WQ in reset state). */
168 rte_io_wmb();
169 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
170 txq->cq_pi = txq->cq_ci;
171 rte_io_wmb();
172 }
173
174 /**
175 * Tx queue stop. Device queue goes to the idle state,
176 * all involved mbufs are freed from elts/WQ.
177 *
178 * @param dev
179 * Pointer to Ethernet device structure.
180 * @param idx
181 * Tx queue index.
182 *
183 * @return
184 * 0 on success, a negative errno value otherwise and rte_errno is set.
185 */
186 int
mlx5_tx_queue_stop_primary(struct rte_eth_dev * dev,uint16_t idx)187 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
188 {
189 struct mlx5_priv *priv = dev->data->dev_private;
190 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
191 struct mlx5_txq_ctrl *txq_ctrl =
192 container_of(txq, struct mlx5_txq_ctrl, txq);
193 int ret;
194
195 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
196 /* Move QP to RESET state. */
197 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
198 (uint8_t)priv->dev_port);
199 if (ret)
200 return ret;
201 /* Handle all send completions. */
202 txq_sync_cq(txq);
203 /* Free elts stored in the SQ. */
204 txq_free_elts(txq_ctrl);
205 /* Prevent writing new pkts to SQ by setting no free WQE.*/
206 txq->wqe_ci = txq->wqe_s;
207 txq->wqe_pi = 0;
208 txq->elts_comp = 0;
209 /* Set the actual queue state. */
210 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
211 return 0;
212 }
213
214 /**
215 * Tx queue stop. Device queue goes to the idle state,
216 * all involved mbufs are freed from elts/WQ.
217 *
218 * @param dev
219 * Pointer to Ethernet device structure.
220 * @param idx
221 * Tx queue index.
222 *
223 * @return
224 * 0 on success, a negative errno value otherwise and rte_errno is set.
225 */
226 int
mlx5_tx_queue_stop(struct rte_eth_dev * dev,uint16_t idx)227 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
228 {
229 int ret;
230
231 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
232 DRV_LOG(ERR, "Hairpin queue can't be stopped");
233 rte_errno = EINVAL;
234 return -EINVAL;
235 }
236 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
237 return 0;
238 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
239 ret = mlx5_mp_os_req_queue_control(dev, idx,
240 MLX5_MP_REQ_QUEUE_TX_STOP);
241 } else {
242 ret = mlx5_tx_queue_stop_primary(dev, idx);
243 }
244 return ret;
245 }
246
247 /**
248 * Rx queue start. Device queue goes to the ready state,
249 * all required mbufs are allocated and WQ is replenished.
250 *
251 * @param dev
252 * Pointer to Ethernet device structure.
253 * @param idx
254 * RX queue index.
255 *
256 * @return
257 * 0 on success, a negative errno value otherwise and rte_errno is set.
258 */
259 int
mlx5_tx_queue_start_primary(struct rte_eth_dev * dev,uint16_t idx)260 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
261 {
262 struct mlx5_priv *priv = dev->data->dev_private;
263 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
264 struct mlx5_txq_ctrl *txq_ctrl =
265 container_of(txq, struct mlx5_txq_ctrl, txq);
266 int ret;
267
268 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
269 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
270 MLX5_TXQ_MOD_RST2RDY,
271 (uint8_t)priv->dev_port);
272 if (ret)
273 return ret;
274 txq_ctrl->txq.wqe_ci = 0;
275 txq_ctrl->txq.wqe_pi = 0;
276 txq_ctrl->txq.elts_comp = 0;
277 /* Set the actual queue state. */
278 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
279 return 0;
280 }
281
282 /**
283 * Rx queue start. Device queue goes to the ready state,
284 * all required mbufs are allocated and WQ is replenished.
285 *
286 * @param dev
287 * Pointer to Ethernet device structure.
288 * @param idx
289 * RX queue index.
290 *
291 * @return
292 * 0 on success, a negative errno value otherwise and rte_errno is set.
293 */
294 int
mlx5_tx_queue_start(struct rte_eth_dev * dev,uint16_t idx)295 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
296 {
297 int ret;
298
299 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
300 DRV_LOG(ERR, "Hairpin queue can't be started");
301 rte_errno = EINVAL;
302 return -EINVAL;
303 }
304 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
305 return 0;
306 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
307 ret = mlx5_mp_os_req_queue_control(dev, idx,
308 MLX5_MP_REQ_QUEUE_TX_START);
309 } else {
310 ret = mlx5_tx_queue_start_primary(dev, idx);
311 }
312 return ret;
313 }
314
315 /**
316 * Tx queue presetup checks.
317 *
318 * @param dev
319 * Pointer to Ethernet device structure.
320 * @param idx
321 * Tx queue index.
322 * @param desc
323 * Number of descriptors to configure in queue.
324 *
325 * @return
326 * 0 on success, a negative errno value otherwise and rte_errno is set.
327 */
328 static int
mlx5_tx_queue_pre_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t * desc)329 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
330 {
331 struct mlx5_priv *priv = dev->data->dev_private;
332
333 if (*desc <= MLX5_TX_COMP_THRESH) {
334 DRV_LOG(WARNING,
335 "port %u number of descriptors requested for Tx queue"
336 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
337 " instead of %u", dev->data->port_id, idx,
338 MLX5_TX_COMP_THRESH + 1, *desc);
339 *desc = MLX5_TX_COMP_THRESH + 1;
340 }
341 if (!rte_is_power_of_2(*desc)) {
342 *desc = 1 << log2above(*desc);
343 DRV_LOG(WARNING,
344 "port %u increased number of descriptors in Tx queue"
345 " %u to the next power of two (%d)",
346 dev->data->port_id, idx, *desc);
347 }
348 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
349 dev->data->port_id, idx, *desc);
350 if (idx >= priv->txqs_n) {
351 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
352 dev->data->port_id, idx, priv->txqs_n);
353 rte_errno = EOVERFLOW;
354 return -rte_errno;
355 }
356 if (!mlx5_txq_releasable(dev, idx)) {
357 rte_errno = EBUSY;
358 DRV_LOG(ERR, "port %u unable to release queue index %u",
359 dev->data->port_id, idx);
360 return -rte_errno;
361 }
362 mlx5_txq_release(dev, idx);
363 return 0;
364 }
365
366 /**
367 * DPDK callback to configure a TX queue.
368 *
369 * @param dev
370 * Pointer to Ethernet device structure.
371 * @param idx
372 * TX queue index.
373 * @param desc
374 * Number of descriptors to configure in queue.
375 * @param socket
376 * NUMA socket on which memory must be allocated.
377 * @param[in] conf
378 * Thresholds parameters.
379 *
380 * @return
381 * 0 on success, a negative errno value otherwise and rte_errno is set.
382 */
383 int
mlx5_tx_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_txconf * conf)384 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
385 unsigned int socket, const struct rte_eth_txconf *conf)
386 {
387 struct mlx5_priv *priv = dev->data->dev_private;
388 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
389 struct mlx5_txq_ctrl *txq_ctrl =
390 container_of(txq, struct mlx5_txq_ctrl, txq);
391 int res;
392
393 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
394 if (res)
395 return res;
396 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
397 if (!txq_ctrl) {
398 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
399 dev->data->port_id, idx);
400 return -rte_errno;
401 }
402 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
403 dev->data->port_id, idx);
404 (*priv->txqs)[idx] = &txq_ctrl->txq;
405 return 0;
406 }
407
408 /**
409 * DPDK callback to configure a TX hairpin queue.
410 *
411 * @param dev
412 * Pointer to Ethernet device structure.
413 * @param idx
414 * TX queue index.
415 * @param desc
416 * Number of descriptors to configure in queue.
417 * @param[in] hairpin_conf
418 * The hairpin binding configuration.
419 *
420 * @return
421 * 0 on success, a negative errno value otherwise and rte_errno is set.
422 */
423 int
mlx5_tx_hairpin_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,const struct rte_eth_hairpin_conf * hairpin_conf)424 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
425 uint16_t desc,
426 const struct rte_eth_hairpin_conf *hairpin_conf)
427 {
428 struct mlx5_priv *priv = dev->data->dev_private;
429 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
430 struct mlx5_txq_ctrl *txq_ctrl =
431 container_of(txq, struct mlx5_txq_ctrl, txq);
432 int res;
433
434 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
435 if (res)
436 return res;
437 if (hairpin_conf->peer_count != 1) {
438 rte_errno = EINVAL;
439 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
440 " peer count is %u", dev->data->port_id,
441 idx, hairpin_conf->peer_count);
442 return -rte_errno;
443 }
444 if (hairpin_conf->peers[0].port == dev->data->port_id) {
445 if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
446 rte_errno = EINVAL;
447 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
448 " index %u, Rx %u is larger than %u",
449 dev->data->port_id, idx,
450 hairpin_conf->peers[0].queue, priv->txqs_n);
451 return -rte_errno;
452 }
453 } else {
454 if (hairpin_conf->manual_bind == 0 ||
455 hairpin_conf->tx_explicit == 0) {
456 rte_errno = EINVAL;
457 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
458 " index %u peer port %u with attributes %u %u",
459 dev->data->port_id, idx,
460 hairpin_conf->peers[0].port,
461 hairpin_conf->manual_bind,
462 hairpin_conf->tx_explicit);
463 return -rte_errno;
464 }
465 }
466 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
467 if (!txq_ctrl) {
468 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
469 dev->data->port_id, idx);
470 return -rte_errno;
471 }
472 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
473 dev->data->port_id, idx);
474 (*priv->txqs)[idx] = &txq_ctrl->txq;
475 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
476 return 0;
477 }
478
479 /**
480 * DPDK callback to release a TX queue.
481 *
482 * @param dev
483 * Pointer to Ethernet device structure.
484 * @param qid
485 * Transmit queue index.
486 */
487 void
mlx5_tx_queue_release(struct rte_eth_dev * dev,uint16_t qid)488 mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
489 {
490 struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
491
492 if (txq == NULL)
493 return;
494 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
495 dev->data->port_id, qid);
496 mlx5_txq_release(dev, qid);
497 }
498
499 /**
500 * Remap UAR register of a Tx queue for secondary process.
501 *
502 * Remapped address is stored at the table in the process private structure of
503 * the device, indexed by queue index.
504 *
505 * @param txq_ctrl
506 * Pointer to Tx queue control structure.
507 * @param fd
508 * Verbs file descriptor to map UAR pages.
509 *
510 * @return
511 * 0 on success, a negative errno value otherwise and rte_errno is set.
512 */
513 static int
txq_uar_init_secondary(struct mlx5_txq_ctrl * txq_ctrl,int fd)514 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
515 {
516 struct mlx5_priv *priv = txq_ctrl->priv;
517 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
518 struct mlx5_proc_priv *primary_ppriv = priv->sh->pppriv;
519 struct mlx5_txq_data *txq = &txq_ctrl->txq;
520 void *addr;
521 uintptr_t uar_va;
522 uintptr_t offset;
523 const size_t page_size = rte_mem_page_size();
524 if (page_size == (size_t)-1) {
525 DRV_LOG(ERR, "Failed to get mem page size");
526 rte_errno = ENOMEM;
527 return -rte_errno;
528 }
529
530 if (txq_ctrl->is_hairpin)
531 return 0;
532 MLX5_ASSERT(ppriv);
533 /*
534 * As rdma-core, UARs are mapped in size of OS page
535 * size. Ref to libmlx5 function: mlx5_init_context()
536 */
537 uar_va = (uintptr_t)primary_ppriv->uar_table[txq->idx].db;
538 offset = uar_va & (page_size - 1); /* Offset in page. */
539 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
540 fd, txq_ctrl->uar_mmap_offset);
541 if (!addr) {
542 DRV_LOG(ERR, "Port %u mmap failed for BF reg of txq %u.",
543 txq->port_id, txq->idx);
544 rte_errno = ENXIO;
545 return -rte_errno;
546 }
547 addr = RTE_PTR_ADD(addr, offset);
548 ppriv->uar_table[txq->idx].db = addr;
549 #ifndef RTE_ARCH_64
550 ppriv->uar_table[txq->idx].sl_p =
551 primary_ppriv->uar_table[txq->idx].sl_p;
552 #endif
553 return 0;
554 }
555
556 /**
557 * Unmap UAR register of a Tx queue for secondary process.
558 *
559 * @param txq_ctrl
560 * Pointer to Tx queue control structure.
561 */
562 static void
txq_uar_uninit_secondary(struct mlx5_txq_ctrl * txq_ctrl)563 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
564 {
565 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
566 void *addr;
567 const size_t page_size = rte_mem_page_size();
568 if (page_size == (size_t)-1) {
569 DRV_LOG(ERR, "Failed to get mem page size");
570 rte_errno = ENOMEM;
571 }
572
573 if (txq_ctrl->is_hairpin)
574 return;
575 addr = ppriv->uar_table[txq_ctrl->txq.idx].db;
576 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
577 }
578
579 /**
580 * Deinitialize Tx UAR registers for secondary process.
581 *
582 * @param dev
583 * Pointer to Ethernet device.
584 */
585 void
mlx5_tx_uar_uninit_secondary(struct rte_eth_dev * dev)586 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
587 {
588 struct mlx5_proc_priv *ppriv = (struct mlx5_proc_priv *)
589 dev->process_private;
590 const size_t page_size = rte_mem_page_size();
591 void *addr;
592 unsigned int i;
593
594 if (page_size == (size_t)-1) {
595 DRV_LOG(ERR, "Failed to get mem page size");
596 return;
597 }
598 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
599 for (i = 0; i != ppriv->uar_table_sz; ++i) {
600 if (!ppriv->uar_table[i].db)
601 continue;
602 addr = ppriv->uar_table[i].db;
603 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
604
605 }
606 }
607
608 /**
609 * Initialize Tx UAR registers for secondary process.
610 *
611 * @param dev
612 * Pointer to Ethernet device.
613 * @param fd
614 * Verbs file descriptor to map UAR pages.
615 *
616 * @return
617 * 0 on success, a negative errno value otherwise and rte_errno is set.
618 */
619 int
mlx5_tx_uar_init_secondary(struct rte_eth_dev * dev,int fd)620 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
621 {
622 struct mlx5_priv *priv = dev->data->dev_private;
623 struct mlx5_txq_data *txq;
624 struct mlx5_txq_ctrl *txq_ctrl;
625 unsigned int i;
626 int ret;
627
628 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
629 for (i = 0; i != priv->txqs_n; ++i) {
630 if (!(*priv->txqs)[i])
631 continue;
632 txq = (*priv->txqs)[i];
633 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
634 if (txq_ctrl->is_hairpin)
635 continue;
636 MLX5_ASSERT(txq->idx == (uint16_t)i);
637 ret = txq_uar_init_secondary(txq_ctrl, fd);
638 if (ret)
639 goto error;
640 }
641 return 0;
642 error:
643 /* Rollback. */
644 do {
645 if (!(*priv->txqs)[i])
646 continue;
647 txq = (*priv->txqs)[i];
648 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
649 txq_uar_uninit_secondary(txq_ctrl);
650 } while (i--);
651 return -rte_errno;
652 }
653
654 /**
655 * Verify the Verbs Tx queue list is empty
656 *
657 * @param dev
658 * Pointer to Ethernet device.
659 *
660 * @return
661 * The number of object not released.
662 */
663 int
mlx5_txq_obj_verify(struct rte_eth_dev * dev)664 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
665 {
666 struct mlx5_priv *priv = dev->data->dev_private;
667 int ret = 0;
668 struct mlx5_txq_obj *txq_obj;
669
670 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
671 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
672 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
673 ++ret;
674 }
675 return ret;
676 }
677
678 /**
679 * Calculate the total number of WQEBB for Tx queue.
680 *
681 * Simplified version of calc_sq_size() in rdma-core.
682 *
683 * @param txq_ctrl
684 * Pointer to Tx queue control structure.
685 *
686 * @return
687 * The number of WQEBB.
688 */
689 static int
txq_calc_wqebb_cnt(struct mlx5_txq_ctrl * txq_ctrl)690 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
691 {
692 unsigned int wqe_size;
693 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
694
695 wqe_size = MLX5_WQE_CSEG_SIZE +
696 MLX5_WQE_ESEG_SIZE +
697 MLX5_WSEG_SIZE -
698 MLX5_ESEG_MIN_INLINE_SIZE +
699 txq_ctrl->max_inline_data;
700 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
701 }
702
703 /**
704 * Calculate the maximal inline data size for Tx queue.
705 *
706 * @param txq_ctrl
707 * Pointer to Tx queue control structure.
708 *
709 * @return
710 * The maximal inline data size.
711 */
712 static unsigned int
txq_calc_inline_max(struct mlx5_txq_ctrl * txq_ctrl)713 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
714 {
715 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
716 struct mlx5_priv *priv = txq_ctrl->priv;
717 unsigned int wqe_size;
718
719 wqe_size = priv->sh->dev_cap.max_qp_wr / desc;
720 if (!wqe_size)
721 return 0;
722 /*
723 * This calculation is derived from tthe source of
724 * mlx5_calc_send_wqe() in rdma_core library.
725 */
726 wqe_size = wqe_size * MLX5_WQE_SIZE -
727 MLX5_WQE_CSEG_SIZE -
728 MLX5_WQE_ESEG_SIZE -
729 MLX5_WSEG_SIZE -
730 MLX5_WSEG_SIZE +
731 MLX5_DSEG_MIN_INLINE_SIZE;
732 return wqe_size;
733 }
734
735 /**
736 * Set Tx queue parameters from device configuration.
737 *
738 * @param txq_ctrl
739 * Pointer to Tx queue control structure.
740 */
741 static void
txq_set_params(struct mlx5_txq_ctrl * txq_ctrl)742 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
743 {
744 struct mlx5_priv *priv = txq_ctrl->priv;
745 struct mlx5_port_config *config = &priv->config;
746 struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
747 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
748 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
749 unsigned int inlen_mode; /* Minimal required Inline data. */
750 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
751 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
752 bool tso = txq_ctrl->txq.offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO |
753 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
754 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
755 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
756 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO);
757 bool vlan_inline;
758 unsigned int temp;
759
760 txq_ctrl->txq.fast_free =
761 !!((txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
762 !(txq_ctrl->txq.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) &&
763 !config->mprq.enabled);
764 if (config->txqs_inline == MLX5_ARG_UNSET)
765 txqs_inline =
766 #if defined(RTE_ARCH_ARM64)
767 (priv->pci_dev && priv->pci_dev->id.device_id ==
768 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
769 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
770 #endif
771 MLX5_INLINE_MAX_TXQS;
772 else
773 txqs_inline = (unsigned int)config->txqs_inline;
774 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
775 MLX5_SEND_DEF_INLINE_LEN :
776 (unsigned int)config->txq_inline_max;
777 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
778 MLX5_EMPW_DEF_INLINE_LEN :
779 (unsigned int)config->txq_inline_mpw;
780 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
781 0 : (unsigned int)config->txq_inline_min;
782 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
783 inlen_empw = 0;
784 /*
785 * If there is requested minimal amount of data to inline
786 * we MUST enable inlining. This is a case for ConnectX-4
787 * which usually requires L2 inlined for correct operating
788 * and ConnectX-4 Lx which requires L2-L4 inlined to
789 * support E-Switch Flows.
790 */
791 if (inlen_mode) {
792 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
793 /*
794 * Optimize minimal inlining for single
795 * segment packets to fill one WQEBB
796 * without gaps.
797 */
798 temp = MLX5_ESEG_MIN_INLINE_SIZE;
799 } else {
800 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
801 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
802 MLX5_ESEG_MIN_INLINE_SIZE;
803 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
804 }
805 if (temp != inlen_mode) {
806 DRV_LOG(INFO,
807 "port %u minimal required inline setting"
808 " aligned from %u to %u",
809 PORT_ID(priv), inlen_mode, temp);
810 inlen_mode = temp;
811 }
812 }
813 /*
814 * If port is configured to support VLAN insertion and device
815 * does not support this feature by HW (for NICs before ConnectX-5
816 * or in case of wqe_vlan_insert flag is not set) we must enable
817 * data inline on all queues because it is supported by single
818 * tx_burst routine.
819 */
820 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
821 vlan_inline = (dev_txoff & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) &&
822 !config->hw_vlan_insert;
823 /*
824 * If there are few Tx queues it is prioritized
825 * to save CPU cycles and disable data inlining at all.
826 */
827 if (inlen_send && priv->txqs_n >= txqs_inline) {
828 /*
829 * The data sent with ordinal MLX5_OPCODE_SEND
830 * may be inlined in Ethernet Segment, align the
831 * length accordingly to fit entire WQEBBs.
832 */
833 temp = RTE_MAX(inlen_send,
834 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
835 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
836 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
837 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
838 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
839 MLX5_ESEG_MIN_INLINE_SIZE -
840 MLX5_WQE_CSEG_SIZE -
841 MLX5_WQE_ESEG_SIZE -
842 MLX5_WQE_DSEG_SIZE * 2);
843 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
844 temp = RTE_MAX(temp, inlen_mode);
845 if (temp != inlen_send) {
846 DRV_LOG(INFO,
847 "port %u ordinary send inline setting"
848 " aligned from %u to %u",
849 PORT_ID(priv), inlen_send, temp);
850 inlen_send = temp;
851 }
852 /*
853 * Not aligned to cache lines, but to WQEs.
854 * First bytes of data (initial alignment)
855 * is going to be copied explicitly at the
856 * beginning of inlining buffer in Ethernet
857 * Segment.
858 */
859 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
860 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
861 MLX5_ESEG_MIN_INLINE_SIZE -
862 MLX5_WQE_CSEG_SIZE -
863 MLX5_WQE_ESEG_SIZE -
864 MLX5_WQE_DSEG_SIZE * 2);
865 } else if (inlen_mode) {
866 /*
867 * If minimal inlining is requested we must
868 * enable inlining in general, despite the
869 * number of configured queues. Ignore the
870 * txq_inline_max devarg, this is not
871 * full-featured inline.
872 */
873 inlen_send = inlen_mode;
874 inlen_empw = 0;
875 } else if (vlan_inline) {
876 /*
877 * Hardware does not report offload for
878 * VLAN insertion, we must enable data inline
879 * to implement feature by software.
880 */
881 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
882 inlen_empw = 0;
883 } else {
884 inlen_send = 0;
885 inlen_empw = 0;
886 }
887 txq_ctrl->txq.inlen_send = inlen_send;
888 txq_ctrl->txq.inlen_mode = inlen_mode;
889 txq_ctrl->txq.inlen_empw = 0;
890 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
891 /*
892 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
893 * may be inlined in Data Segment, align the
894 * length accordingly to fit entire WQEBBs.
895 */
896 temp = RTE_MAX(inlen_empw,
897 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
898 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
899 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
900 temp += MLX5_DSEG_MIN_INLINE_SIZE;
901 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
902 MLX5_DSEG_MIN_INLINE_SIZE -
903 MLX5_WQE_CSEG_SIZE -
904 MLX5_WQE_ESEG_SIZE -
905 MLX5_WQE_DSEG_SIZE);
906 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
907 if (temp != inlen_empw) {
908 DRV_LOG(INFO,
909 "port %u enhanced empw inline setting"
910 " aligned from %u to %u",
911 PORT_ID(priv), inlen_empw, temp);
912 inlen_empw = temp;
913 }
914 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
915 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
916 MLX5_DSEG_MIN_INLINE_SIZE -
917 MLX5_WQE_CSEG_SIZE -
918 MLX5_WQE_ESEG_SIZE -
919 MLX5_WQE_DSEG_SIZE);
920 txq_ctrl->txq.inlen_empw = inlen_empw;
921 }
922 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
923 if (tso) {
924 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
925 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
926 MLX5_MAX_TSO_HEADER);
927 txq_ctrl->txq.tso_en = 1;
928 }
929 if (((RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO & txq_ctrl->txq.offloads) &&
930 (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_VXLAN_CAP)) |
931 ((RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO & txq_ctrl->txq.offloads) &&
932 (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GRE_CAP)) |
933 ((RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO & txq_ctrl->txq.offloads) &&
934 (dev_cap->tunnel_en & MLX5_TUNNELED_OFFLOADS_GENEVE_CAP)) |
935 (dev_cap->swp & MLX5_SW_PARSING_TSO_CAP))
936 txq_ctrl->txq.tunnel_en = 1;
937 txq_ctrl->txq.swp_en = (((RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
938 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO) &
939 txq_ctrl->txq.offloads) && (dev_cap->swp &
940 MLX5_SW_PARSING_TSO_CAP)) |
941 ((RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM &
942 txq_ctrl->txq.offloads) && (dev_cap->swp &
943 MLX5_SW_PARSING_CSUM_CAP));
944 }
945
946 /**
947 * Adjust Tx queue data inline parameters for large queue sizes.
948 * The data inline feature requires multiple WQEs to fit the packets,
949 * and if the large amount of Tx descriptors is requested by application
950 * the total WQE amount may exceed the hardware capabilities. If the
951 * default inline setting are used we can try to adjust these ones and
952 * meet the hardware requirements and not exceed the queue size.
953 *
954 * @param txq_ctrl
955 * Pointer to Tx queue control structure.
956 *
957 * @return
958 * Zero on success, otherwise the parameters can not be adjusted.
959 */
960 static int
txq_adjust_params(struct mlx5_txq_ctrl * txq_ctrl)961 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
962 {
963 struct mlx5_priv *priv = txq_ctrl->priv;
964 struct mlx5_port_config *config = &priv->config;
965 unsigned int max_inline;
966
967 max_inline = txq_calc_inline_max(txq_ctrl);
968 if (!txq_ctrl->txq.inlen_send) {
969 /*
970 * Inline data feature is not engaged at all.
971 * There is nothing to adjust.
972 */
973 return 0;
974 }
975 if (txq_ctrl->max_inline_data <= max_inline) {
976 /*
977 * The requested inline data length does not
978 * exceed queue capabilities.
979 */
980 return 0;
981 }
982 if (txq_ctrl->txq.inlen_mode > max_inline) {
983 DRV_LOG(ERR,
984 "minimal data inline requirements (%u) are not"
985 " satisfied (%u) on port %u, try the smaller"
986 " Tx queue size (%d)",
987 txq_ctrl->txq.inlen_mode, max_inline,
988 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
989 goto error;
990 }
991 if (txq_ctrl->txq.inlen_send > max_inline &&
992 config->txq_inline_max != MLX5_ARG_UNSET &&
993 config->txq_inline_max > (int)max_inline) {
994 DRV_LOG(ERR,
995 "txq_inline_max requirements (%u) are not"
996 " satisfied (%u) on port %u, try the smaller"
997 " Tx queue size (%d)",
998 txq_ctrl->txq.inlen_send, max_inline,
999 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1000 goto error;
1001 }
1002 if (txq_ctrl->txq.inlen_empw > max_inline &&
1003 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1004 config->txq_inline_mpw > (int)max_inline) {
1005 DRV_LOG(ERR,
1006 "txq_inline_mpw requirements (%u) are not"
1007 " satisfied (%u) on port %u, try the smaller"
1008 " Tx queue size (%d)",
1009 txq_ctrl->txq.inlen_empw, max_inline,
1010 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1011 goto error;
1012 }
1013 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1014 DRV_LOG(ERR,
1015 "tso header inline requirements (%u) are not"
1016 " satisfied (%u) on port %u, try the smaller"
1017 " Tx queue size (%d)",
1018 MLX5_MAX_TSO_HEADER, max_inline,
1019 priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
1020 goto error;
1021 }
1022 if (txq_ctrl->txq.inlen_send > max_inline) {
1023 DRV_LOG(WARNING,
1024 "adjust txq_inline_max (%u->%u)"
1025 " due to large Tx queue on port %u",
1026 txq_ctrl->txq.inlen_send, max_inline,
1027 priv->dev_data->port_id);
1028 txq_ctrl->txq.inlen_send = max_inline;
1029 }
1030 if (txq_ctrl->txq.inlen_empw > max_inline) {
1031 DRV_LOG(WARNING,
1032 "adjust txq_inline_mpw (%u->%u)"
1033 "due to large Tx queue on port %u",
1034 txq_ctrl->txq.inlen_empw, max_inline,
1035 priv->dev_data->port_id);
1036 txq_ctrl->txq.inlen_empw = max_inline;
1037 }
1038 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1039 txq_ctrl->txq.inlen_empw);
1040 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1041 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1042 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1043 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1044 !txq_ctrl->txq.inlen_empw);
1045 return 0;
1046 error:
1047 rte_errno = ENOMEM;
1048 return -ENOMEM;
1049 }
1050
1051 /**
1052 * Create a DPDK Tx queue.
1053 *
1054 * @param dev
1055 * Pointer to Ethernet device.
1056 * @param idx
1057 * TX queue index.
1058 * @param desc
1059 * Number of descriptors to configure in queue.
1060 * @param socket
1061 * NUMA socket on which memory must be allocated.
1062 * @param[in] conf
1063 * Thresholds parameters.
1064 *
1065 * @return
1066 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1067 */
1068 struct mlx5_txq_ctrl *
mlx5_txq_new(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_txconf * conf)1069 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1070 unsigned int socket, const struct rte_eth_txconf *conf)
1071 {
1072 struct mlx5_priv *priv = dev->data->dev_private;
1073 struct mlx5_txq_ctrl *tmpl;
1074
1075 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1076 desc * sizeof(struct rte_mbuf *), 0, socket);
1077 if (!tmpl) {
1078 rte_errno = ENOMEM;
1079 return NULL;
1080 }
1081 if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
1082 &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1083 /* rte_errno is already set. */
1084 goto error;
1085 }
1086 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1087 tmpl->txq.offloads = conf->offloads |
1088 dev->data->dev_conf.txmode.offloads;
1089 tmpl->priv = priv;
1090 tmpl->socket = socket;
1091 tmpl->txq.elts_n = log2above(desc);
1092 tmpl->txq.elts_s = desc;
1093 tmpl->txq.elts_m = desc - 1;
1094 tmpl->txq.port_id = dev->data->port_id;
1095 tmpl->txq.idx = idx;
1096 txq_set_params(tmpl);
1097 if (txq_adjust_params(tmpl))
1098 goto error;
1099 if (txq_calc_wqebb_cnt(tmpl) >
1100 priv->sh->dev_cap.max_qp_wr) {
1101 DRV_LOG(ERR,
1102 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1103 " try smaller queue size",
1104 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1105 priv->sh->dev_cap.max_qp_wr);
1106 rte_errno = ENOMEM;
1107 goto error;
1108 }
1109 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1110 tmpl->is_hairpin = false;
1111 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1112 return tmpl;
1113 error:
1114 mlx5_mr_btree_free(&tmpl->txq.mr_ctrl.cache_bh);
1115 mlx5_free(tmpl);
1116 return NULL;
1117 }
1118
1119 /**
1120 * Create a DPDK Tx hairpin queue.
1121 *
1122 * @param dev
1123 * Pointer to Ethernet device.
1124 * @param idx
1125 * TX queue index.
1126 * @param desc
1127 * Number of descriptors to configure in queue.
1128 * @param hairpin_conf
1129 * The hairpin configuration.
1130 *
1131 * @return
1132 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1133 */
1134 struct mlx5_txq_ctrl *
mlx5_txq_hairpin_new(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,const struct rte_eth_hairpin_conf * hairpin_conf)1135 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1136 const struct rte_eth_hairpin_conf *hairpin_conf)
1137 {
1138 struct mlx5_priv *priv = dev->data->dev_private;
1139 struct mlx5_txq_ctrl *tmpl;
1140
1141 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1142 SOCKET_ID_ANY);
1143 if (!tmpl) {
1144 rte_errno = ENOMEM;
1145 return NULL;
1146 }
1147 tmpl->priv = priv;
1148 tmpl->socket = SOCKET_ID_ANY;
1149 tmpl->txq.elts_n = log2above(desc);
1150 tmpl->txq.port_id = dev->data->port_id;
1151 tmpl->txq.idx = idx;
1152 tmpl->hairpin_conf = *hairpin_conf;
1153 tmpl->is_hairpin = true;
1154 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1155 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1156 return tmpl;
1157 }
1158
1159 /**
1160 * Get a Tx queue.
1161 *
1162 * @param dev
1163 * Pointer to Ethernet device.
1164 * @param idx
1165 * TX queue index.
1166 *
1167 * @return
1168 * A pointer to the queue if it exists.
1169 */
1170 struct mlx5_txq_ctrl *
mlx5_txq_get(struct rte_eth_dev * dev,uint16_t idx)1171 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1172 {
1173 struct mlx5_priv *priv = dev->data->dev_private;
1174 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1175 struct mlx5_txq_ctrl *ctrl = NULL;
1176
1177 if (txq_data) {
1178 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1179 __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1180 }
1181 return ctrl;
1182 }
1183
1184 /**
1185 * Release a Tx queue.
1186 *
1187 * @param dev
1188 * Pointer to Ethernet device.
1189 * @param idx
1190 * TX queue index.
1191 *
1192 * @return
1193 * 1 while a reference on it exists, 0 when freed.
1194 */
1195 int
mlx5_txq_release(struct rte_eth_dev * dev,uint16_t idx)1196 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1197 {
1198 struct mlx5_priv *priv = dev->data->dev_private;
1199 struct mlx5_txq_ctrl *txq_ctrl;
1200
1201 if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
1202 return 0;
1203 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1204 if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1205 return 1;
1206 if (txq_ctrl->obj) {
1207 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1208 LIST_REMOVE(txq_ctrl->obj, next);
1209 mlx5_free(txq_ctrl->obj);
1210 txq_ctrl->obj = NULL;
1211 }
1212 if (!txq_ctrl->is_hairpin) {
1213 if (txq_ctrl->txq.fcqs) {
1214 mlx5_free(txq_ctrl->txq.fcqs);
1215 txq_ctrl->txq.fcqs = NULL;
1216 }
1217 txq_free_elts(txq_ctrl);
1218 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1219 }
1220 if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1221 if (!txq_ctrl->is_hairpin)
1222 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1223 LIST_REMOVE(txq_ctrl, next);
1224 mlx5_free(txq_ctrl);
1225 (*priv->txqs)[idx] = NULL;
1226 }
1227 return 0;
1228 }
1229
1230 /**
1231 * Verify if the queue can be released.
1232 *
1233 * @param dev
1234 * Pointer to Ethernet device.
1235 * @param idx
1236 * TX queue index.
1237 *
1238 * @return
1239 * 1 if the queue can be released.
1240 */
1241 int
mlx5_txq_releasable(struct rte_eth_dev * dev,uint16_t idx)1242 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1243 {
1244 struct mlx5_priv *priv = dev->data->dev_private;
1245 struct mlx5_txq_ctrl *txq;
1246
1247 if (!(*priv->txqs)[idx])
1248 return -1;
1249 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1250 return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1251 }
1252
1253 /**
1254 * Verify the Tx Queue list is empty
1255 *
1256 * @param dev
1257 * Pointer to Ethernet device.
1258 *
1259 * @return
1260 * The number of object not released.
1261 */
1262 int
mlx5_txq_verify(struct rte_eth_dev * dev)1263 mlx5_txq_verify(struct rte_eth_dev *dev)
1264 {
1265 struct mlx5_priv *priv = dev->data->dev_private;
1266 struct mlx5_txq_ctrl *txq_ctrl;
1267 int ret = 0;
1268
1269 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1270 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1271 dev->data->port_id, txq_ctrl->txq.idx);
1272 ++ret;
1273 }
1274 return ret;
1275 }
1276
1277 /**
1278 * Set the Tx queue dynamic timestamp (mask and offset)
1279 *
1280 * @param[in] dev
1281 * Pointer to the Ethernet device structure.
1282 */
1283 void
mlx5_txq_dynf_timestamp_set(struct rte_eth_dev * dev)1284 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1285 {
1286 struct mlx5_priv *priv = dev->data->dev_private;
1287 struct mlx5_dev_ctx_shared *sh = priv->sh;
1288 struct mlx5_txq_data *data;
1289 int off, nbit;
1290 unsigned int i;
1291 uint64_t mask = 0;
1292 uint64_t ts_mask;
1293
1294 if (sh->dev_cap.rt_timestamp ||
1295 !sh->cdev->config.hca_attr.dev_freq_khz)
1296 ts_mask = MLX5_TS_MASK_SECS << 32;
1297 else
1298 ts_mask = rte_align64pow2(MLX5_TS_MASK_SECS * 1000ull *
1299 sh->cdev->config.hca_attr.dev_freq_khz);
1300 ts_mask = rte_cpu_to_be_64(ts_mask - 1ull);
1301 nbit = rte_mbuf_dynflag_lookup
1302 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1303 off = rte_mbuf_dynfield_lookup
1304 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1305 if (nbit >= 0 && off >= 0 &&
1306 (sh->txpp.refcnt || priv->sh->cdev->config.hca_attr.wait_on_time))
1307 mask = 1ULL << nbit;
1308 for (i = 0; i != priv->txqs_n; ++i) {
1309 data = (*priv->txqs)[i];
1310 if (!data)
1311 continue;
1312 data->sh = sh;
1313 data->ts_mask = mask;
1314 data->ts_offset = off;
1315 data->rt_timestamp = sh->dev_cap.rt_timestamp;
1316 data->rt_timemask = (data->offloads &
1317 RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP) ?
1318 ts_mask : 0;
1319 }
1320 }
1321