1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
4 */
5
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_eal_paging.h>
18
19 #include <mlx5_common.h>
20 #include <mlx5_common_mr.h>
21 #include <mlx5_malloc.h>
22
23 #include "mlx5_defs.h"
24 #include "mlx5_utils.h"
25 #include "mlx5.h"
26 #include "mlx5_rxtx.h"
27 #include "mlx5_autoconf.h"
28
29 /**
30 * Allocate TX queue elements.
31 *
32 * @param txq_ctrl
33 * Pointer to TX queue structure.
34 */
35 void
txq_alloc_elts(struct mlx5_txq_ctrl * txq_ctrl)36 txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
37 {
38 const unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
39 unsigned int i;
40
41 for (i = 0; (i != elts_n); ++i)
42 txq_ctrl->txq.elts[i] = NULL;
43 DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
44 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
45 txq_ctrl->txq.elts_head = 0;
46 txq_ctrl->txq.elts_tail = 0;
47 txq_ctrl->txq.elts_comp = 0;
48 }
49
50 /**
51 * Free TX queue elements.
52 *
53 * @param txq_ctrl
54 * Pointer to TX queue structure.
55 */
56 void
txq_free_elts(struct mlx5_txq_ctrl * txq_ctrl)57 txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
58 {
59 const uint16_t elts_n = 1 << txq_ctrl->txq.elts_n;
60 const uint16_t elts_m = elts_n - 1;
61 uint16_t elts_head = txq_ctrl->txq.elts_head;
62 uint16_t elts_tail = txq_ctrl->txq.elts_tail;
63 struct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;
64
65 DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
66 PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
67 txq_ctrl->txq.elts_head = 0;
68 txq_ctrl->txq.elts_tail = 0;
69 txq_ctrl->txq.elts_comp = 0;
70
71 while (elts_tail != elts_head) {
72 struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
73
74 MLX5_ASSERT(elt != NULL);
75 rte_pktmbuf_free_seg(elt);
76 #ifdef RTE_LIBRTE_MLX5_DEBUG
77 /* Poisoning. */
78 memset(&(*elts)[elts_tail & elts_m],
79 0x77,
80 sizeof((*elts)[elts_tail & elts_m]));
81 #endif
82 ++elts_tail;
83 }
84 }
85
86 /**
87 * Returns the per-port supported offloads.
88 *
89 * @param dev
90 * Pointer to Ethernet device.
91 *
92 * @return
93 * Supported Tx offloads.
94 */
95 uint64_t
mlx5_get_tx_port_offloads(struct rte_eth_dev * dev)96 mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
97 {
98 struct mlx5_priv *priv = dev->data->dev_private;
99 uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
100 DEV_TX_OFFLOAD_VLAN_INSERT);
101 struct mlx5_dev_config *config = &priv->config;
102
103 if (config->hw_csum)
104 offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
105 DEV_TX_OFFLOAD_UDP_CKSUM |
106 DEV_TX_OFFLOAD_TCP_CKSUM);
107 if (config->tso)
108 offloads |= DEV_TX_OFFLOAD_TCP_TSO;
109 if (config->tx_pp)
110 offloads |= DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP;
111 if (config->swp) {
112 if (config->hw_csum)
113 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
114 if (config->tso)
115 offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
116 DEV_TX_OFFLOAD_UDP_TNL_TSO);
117 }
118 if (config->tunnel_en) {
119 if (config->hw_csum)
120 offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
121 if (config->tso)
122 offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
123 DEV_TX_OFFLOAD_GRE_TNL_TSO |
124 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
125 }
126 return offloads;
127 }
128
129 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
130 static void
txq_sync_cq(struct mlx5_txq_data * txq)131 txq_sync_cq(struct mlx5_txq_data *txq)
132 {
133 volatile struct mlx5_cqe *cqe;
134 int ret, i;
135
136 i = txq->cqe_s;
137 do {
138 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
139 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
140 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
141 if (likely(ret != MLX5_CQE_STATUS_ERR)) {
142 /* No new CQEs in completion queue. */
143 MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
144 break;
145 }
146 }
147 ++txq->cq_ci;
148 } while (--i);
149 /* Move all CQEs to HW ownership. */
150 for (i = 0; i < txq->cqe_s; i++) {
151 cqe = &txq->cqes[i];
152 cqe->op_own = MLX5_CQE_INVALIDATE;
153 }
154 /* Resync CQE and WQE (WQ in reset state). */
155 rte_io_wmb();
156 *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
157 txq->cq_pi = txq->cq_ci;
158 rte_io_wmb();
159 }
160
161 /**
162 * Tx queue stop. Device queue goes to the idle state,
163 * all involved mbufs are freed from elts/WQ.
164 *
165 * @param dev
166 * Pointer to Ethernet device structure.
167 * @param idx
168 * Tx queue index.
169 *
170 * @return
171 * 0 on success, a negative errno value otherwise and rte_errno is set.
172 */
173 int
mlx5_tx_queue_stop_primary(struct rte_eth_dev * dev,uint16_t idx)174 mlx5_tx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
175 {
176 struct mlx5_priv *priv = dev->data->dev_private;
177 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
178 struct mlx5_txq_ctrl *txq_ctrl =
179 container_of(txq, struct mlx5_txq_ctrl, txq);
180 int ret;
181
182 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
183 /* Move QP to RESET state. */
184 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj, MLX5_TXQ_MOD_RDY2RST,
185 (uint8_t)priv->dev_port);
186 if (ret)
187 return ret;
188 /* Handle all send completions. */
189 txq_sync_cq(txq);
190 /* Free elts stored in the SQ. */
191 txq_free_elts(txq_ctrl);
192 /* Prevent writing new pkts to SQ by setting no free WQE.*/
193 txq->wqe_ci = txq->wqe_s;
194 txq->wqe_pi = 0;
195 txq->elts_comp = 0;
196 /* Set the actual queue state. */
197 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
198 return 0;
199 }
200
201 /**
202 * Tx queue stop. Device queue goes to the idle state,
203 * all involved mbufs are freed from elts/WQ.
204 *
205 * @param dev
206 * Pointer to Ethernet device structure.
207 * @param idx
208 * Tx queue index.
209 *
210 * @return
211 * 0 on success, a negative errno value otherwise and rte_errno is set.
212 */
213 int
mlx5_tx_queue_stop(struct rte_eth_dev * dev,uint16_t idx)214 mlx5_tx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
215 {
216 int ret;
217
218 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
219 DRV_LOG(ERR, "Hairpin queue can't be stopped");
220 rte_errno = EINVAL;
221 return -EINVAL;
222 }
223 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
224 return 0;
225 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
226 ret = mlx5_mp_os_req_queue_control(dev, idx,
227 MLX5_MP_REQ_QUEUE_TX_STOP);
228 } else {
229 ret = mlx5_tx_queue_stop_primary(dev, idx);
230 }
231 return ret;
232 }
233
234 /**
235 * Rx queue start. Device queue goes to the ready state,
236 * all required mbufs are allocated and WQ is replenished.
237 *
238 * @param dev
239 * Pointer to Ethernet device structure.
240 * @param idx
241 * RX queue index.
242 *
243 * @return
244 * 0 on success, a negative errno value otherwise and rte_errno is set.
245 */
246 int
mlx5_tx_queue_start_primary(struct rte_eth_dev * dev,uint16_t idx)247 mlx5_tx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
248 {
249 struct mlx5_priv *priv = dev->data->dev_private;
250 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
251 struct mlx5_txq_ctrl *txq_ctrl =
252 container_of(txq, struct mlx5_txq_ctrl, txq);
253 int ret;
254
255 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
256 ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
257 MLX5_TXQ_MOD_RST2RDY,
258 (uint8_t)priv->dev_port);
259 if (ret)
260 return ret;
261 txq_ctrl->txq.wqe_ci = 0;
262 txq_ctrl->txq.wqe_pi = 0;
263 txq_ctrl->txq.elts_comp = 0;
264 /* Set the actual queue state. */
265 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
266 return 0;
267 }
268
269 /**
270 * Rx queue start. Device queue goes to the ready state,
271 * all required mbufs are allocated and WQ is replenished.
272 *
273 * @param dev
274 * Pointer to Ethernet device structure.
275 * @param idx
276 * RX queue index.
277 *
278 * @return
279 * 0 on success, a negative errno value otherwise and rte_errno is set.
280 */
281 int
mlx5_tx_queue_start(struct rte_eth_dev * dev,uint16_t idx)282 mlx5_tx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
283 {
284 int ret;
285
286 if (rte_eth_dev_is_tx_hairpin_queue(dev, idx)) {
287 DRV_LOG(ERR, "Hairpin queue can't be started");
288 rte_errno = EINVAL;
289 return -EINVAL;
290 }
291 if (dev->data->tx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
292 return 0;
293 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
294 ret = mlx5_mp_os_req_queue_control(dev, idx,
295 MLX5_MP_REQ_QUEUE_TX_START);
296 } else {
297 ret = mlx5_tx_queue_start_primary(dev, idx);
298 }
299 return ret;
300 }
301
302 /**
303 * Tx queue presetup checks.
304 *
305 * @param dev
306 * Pointer to Ethernet device structure.
307 * @param idx
308 * Tx queue index.
309 * @param desc
310 * Number of descriptors to configure in queue.
311 *
312 * @return
313 * 0 on success, a negative errno value otherwise and rte_errno is set.
314 */
315 static int
mlx5_tx_queue_pre_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t * desc)316 mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
317 {
318 struct mlx5_priv *priv = dev->data->dev_private;
319
320 if (*desc <= MLX5_TX_COMP_THRESH) {
321 DRV_LOG(WARNING,
322 "port %u number of descriptors requested for Tx queue"
323 " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
324 " instead of %u", dev->data->port_id, idx,
325 MLX5_TX_COMP_THRESH + 1, *desc);
326 *desc = MLX5_TX_COMP_THRESH + 1;
327 }
328 if (!rte_is_power_of_2(*desc)) {
329 *desc = 1 << log2above(*desc);
330 DRV_LOG(WARNING,
331 "port %u increased number of descriptors in Tx queue"
332 " %u to the next power of two (%d)",
333 dev->data->port_id, idx, *desc);
334 }
335 DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
336 dev->data->port_id, idx, *desc);
337 if (idx >= priv->txqs_n) {
338 DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
339 dev->data->port_id, idx, priv->txqs_n);
340 rte_errno = EOVERFLOW;
341 return -rte_errno;
342 }
343 if (!mlx5_txq_releasable(dev, idx)) {
344 rte_errno = EBUSY;
345 DRV_LOG(ERR, "port %u unable to release queue index %u",
346 dev->data->port_id, idx);
347 return -rte_errno;
348 }
349 mlx5_txq_release(dev, idx);
350 return 0;
351 }
352
353 /**
354 * DPDK callback to configure a TX queue.
355 *
356 * @param dev
357 * Pointer to Ethernet device structure.
358 * @param idx
359 * TX queue index.
360 * @param desc
361 * Number of descriptors to configure in queue.
362 * @param socket
363 * NUMA socket on which memory must be allocated.
364 * @param[in] conf
365 * Thresholds parameters.
366 *
367 * @return
368 * 0 on success, a negative errno value otherwise and rte_errno is set.
369 */
370 int
mlx5_tx_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_txconf * conf)371 mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
372 unsigned int socket, const struct rte_eth_txconf *conf)
373 {
374 struct mlx5_priv *priv = dev->data->dev_private;
375 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
376 struct mlx5_txq_ctrl *txq_ctrl =
377 container_of(txq, struct mlx5_txq_ctrl, txq);
378 int res;
379
380 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
381 if (res)
382 return res;
383 txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
384 if (!txq_ctrl) {
385 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
386 dev->data->port_id, idx);
387 return -rte_errno;
388 }
389 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
390 dev->data->port_id, idx);
391 (*priv->txqs)[idx] = &txq_ctrl->txq;
392 return 0;
393 }
394
395 /**
396 * DPDK callback to configure a TX hairpin queue.
397 *
398 * @param dev
399 * Pointer to Ethernet device structure.
400 * @param idx
401 * TX queue index.
402 * @param desc
403 * Number of descriptors to configure in queue.
404 * @param[in] hairpin_conf
405 * The hairpin binding configuration.
406 *
407 * @return
408 * 0 on success, a negative errno value otherwise and rte_errno is set.
409 */
410 int
mlx5_tx_hairpin_queue_setup(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,const struct rte_eth_hairpin_conf * hairpin_conf)411 mlx5_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
412 uint16_t desc,
413 const struct rte_eth_hairpin_conf *hairpin_conf)
414 {
415 struct mlx5_priv *priv = dev->data->dev_private;
416 struct mlx5_txq_data *txq = (*priv->txqs)[idx];
417 struct mlx5_txq_ctrl *txq_ctrl =
418 container_of(txq, struct mlx5_txq_ctrl, txq);
419 int res;
420
421 res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
422 if (res)
423 return res;
424 if (hairpin_conf->peer_count != 1) {
425 rte_errno = EINVAL;
426 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
427 " peer count is %u", dev->data->port_id,
428 idx, hairpin_conf->peer_count);
429 return -rte_errno;
430 }
431 if (hairpin_conf->peers[0].port == dev->data->port_id) {
432 if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
433 rte_errno = EINVAL;
434 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
435 " index %u, Rx %u is larger than %u",
436 dev->data->port_id, idx,
437 hairpin_conf->peers[0].queue, priv->txqs_n);
438 return -rte_errno;
439 }
440 } else {
441 if (hairpin_conf->manual_bind == 0 ||
442 hairpin_conf->tx_explicit == 0) {
443 rte_errno = EINVAL;
444 DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
445 " index %u peer port %u with attributes %u %u",
446 dev->data->port_id, idx,
447 hairpin_conf->peers[0].port,
448 hairpin_conf->manual_bind,
449 hairpin_conf->tx_explicit);
450 return -rte_errno;
451 }
452 }
453 txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
454 if (!txq_ctrl) {
455 DRV_LOG(ERR, "port %u unable to allocate queue index %u",
456 dev->data->port_id, idx);
457 return -rte_errno;
458 }
459 DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
460 dev->data->port_id, idx);
461 (*priv->txqs)[idx] = &txq_ctrl->txq;
462 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
463 return 0;
464 }
465
466 /**
467 * DPDK callback to release a TX queue.
468 *
469 * @param dpdk_txq
470 * Generic TX queue pointer.
471 */
472 void
mlx5_tx_queue_release(void * dpdk_txq)473 mlx5_tx_queue_release(void *dpdk_txq)
474 {
475 struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
476 struct mlx5_txq_ctrl *txq_ctrl;
477 struct mlx5_priv *priv;
478 unsigned int i;
479
480 if (txq == NULL)
481 return;
482 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
483 priv = txq_ctrl->priv;
484 for (i = 0; (i != priv->txqs_n); ++i)
485 if ((*priv->txqs)[i] == txq) {
486 DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
487 PORT_ID(priv), txq->idx);
488 mlx5_txq_release(ETH_DEV(priv), i);
489 break;
490 }
491 }
492
493 /**
494 * Configure the doorbell register non-cached attribute.
495 *
496 * @param txq_ctrl
497 * Pointer to Tx queue control structure.
498 * @param page_size
499 * Systme page size
500 */
501 static void
txq_uar_ncattr_init(struct mlx5_txq_ctrl * txq_ctrl,size_t page_size)502 txq_uar_ncattr_init(struct mlx5_txq_ctrl *txq_ctrl, size_t page_size)
503 {
504 struct mlx5_priv *priv = txq_ctrl->priv;
505 off_t cmd;
506
507 txq_ctrl->txq.db_heu = priv->config.dbnc == MLX5_TXDB_HEURISTIC;
508 txq_ctrl->txq.db_nc = 0;
509 /* Check the doorbell register mapping type. */
510 cmd = txq_ctrl->uar_mmap_offset / page_size;
511 cmd >>= MLX5_UAR_MMAP_CMD_SHIFT;
512 cmd &= MLX5_UAR_MMAP_CMD_MASK;
513 if (cmd == MLX5_MMAP_GET_NC_PAGES_CMD)
514 txq_ctrl->txq.db_nc = 1;
515 }
516
517 /**
518 * Initialize Tx UAR registers for primary process.
519 *
520 * @param txq_ctrl
521 * Pointer to Tx queue control structure.
522 */
523 void
txq_uar_init(struct mlx5_txq_ctrl * txq_ctrl)524 txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl)
525 {
526 struct mlx5_priv *priv = txq_ctrl->priv;
527 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
528 #ifndef RTE_ARCH_64
529 unsigned int lock_idx;
530 #endif
531 const size_t page_size = rte_mem_page_size();
532 if (page_size == (size_t)-1) {
533 DRV_LOG(ERR, "Failed to get mem page size");
534 rte_errno = ENOMEM;
535 }
536
537 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
538 return;
539 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
540 MLX5_ASSERT(ppriv);
541 ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
542 txq_uar_ncattr_init(txq_ctrl, page_size);
543 #ifndef RTE_ARCH_64
544 /* Assign an UAR lock according to UAR page number */
545 lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
546 MLX5_UAR_PAGE_NUM_MASK;
547 txq_ctrl->txq.uar_lock = &priv->sh->uar_lock[lock_idx];
548 #endif
549 }
550
551 /**
552 * Remap UAR register of a Tx queue for secondary process.
553 *
554 * Remapped address is stored at the table in the process private structure of
555 * the device, indexed by queue index.
556 *
557 * @param txq_ctrl
558 * Pointer to Tx queue control structure.
559 * @param fd
560 * Verbs file descriptor to map UAR pages.
561 *
562 * @return
563 * 0 on success, a negative errno value otherwise and rte_errno is set.
564 */
565 static int
txq_uar_init_secondary(struct mlx5_txq_ctrl * txq_ctrl,int fd)566 txq_uar_init_secondary(struct mlx5_txq_ctrl *txq_ctrl, int fd)
567 {
568 struct mlx5_priv *priv = txq_ctrl->priv;
569 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
570 struct mlx5_txq_data *txq = &txq_ctrl->txq;
571 void *addr;
572 uintptr_t uar_va;
573 uintptr_t offset;
574 const size_t page_size = rte_mem_page_size();
575 if (page_size == (size_t)-1) {
576 DRV_LOG(ERR, "Failed to get mem page size");
577 rte_errno = ENOMEM;
578 return -rte_errno;
579 }
580
581 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
582 return 0;
583 MLX5_ASSERT(ppriv);
584 /*
585 * As rdma-core, UARs are mapped in size of OS page
586 * size. Ref to libmlx5 function: mlx5_init_context()
587 */
588 uar_va = (uintptr_t)txq_ctrl->bf_reg;
589 offset = uar_va & (page_size - 1); /* Offset in page. */
590 addr = rte_mem_map(NULL, page_size, RTE_PROT_WRITE, RTE_MAP_SHARED,
591 fd, txq_ctrl->uar_mmap_offset);
592 if (!addr) {
593 DRV_LOG(ERR,
594 "port %u mmap failed for BF reg of txq %u",
595 txq->port_id, txq->idx);
596 rte_errno = ENXIO;
597 return -rte_errno;
598 }
599 addr = RTE_PTR_ADD(addr, offset);
600 ppriv->uar_table[txq->idx] = addr;
601 txq_uar_ncattr_init(txq_ctrl, page_size);
602 return 0;
603 }
604
605 /**
606 * Unmap UAR register of a Tx queue for secondary process.
607 *
608 * @param txq_ctrl
609 * Pointer to Tx queue control structure.
610 */
611 static void
txq_uar_uninit_secondary(struct mlx5_txq_ctrl * txq_ctrl)612 txq_uar_uninit_secondary(struct mlx5_txq_ctrl *txq_ctrl)
613 {
614 struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(txq_ctrl->priv));
615 void *addr;
616 const size_t page_size = rte_mem_page_size();
617 if (page_size == (size_t)-1) {
618 DRV_LOG(ERR, "Failed to get mem page size");
619 rte_errno = ENOMEM;
620 }
621
622 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
623 return;
624 addr = ppriv->uar_table[txq_ctrl->txq.idx];
625 rte_mem_unmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
626 }
627
628 /**
629 * Deinitialize Tx UAR registers for secondary process.
630 *
631 * @param dev
632 * Pointer to Ethernet device.
633 */
634 void
mlx5_tx_uar_uninit_secondary(struct rte_eth_dev * dev)635 mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
636 {
637 struct mlx5_priv *priv = dev->data->dev_private;
638 struct mlx5_txq_data *txq;
639 struct mlx5_txq_ctrl *txq_ctrl;
640 unsigned int i;
641
642 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
643 for (i = 0; i != priv->txqs_n; ++i) {
644 if (!(*priv->txqs)[i])
645 continue;
646 txq = (*priv->txqs)[i];
647 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
648 txq_uar_uninit_secondary(txq_ctrl);
649 }
650 }
651
652 /**
653 * Initialize Tx UAR registers for secondary process.
654 *
655 * @param dev
656 * Pointer to Ethernet device.
657 * @param fd
658 * Verbs file descriptor to map UAR pages.
659 *
660 * @return
661 * 0 on success, a negative errno value otherwise and rte_errno is set.
662 */
663 int
mlx5_tx_uar_init_secondary(struct rte_eth_dev * dev,int fd)664 mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd)
665 {
666 struct mlx5_priv *priv = dev->data->dev_private;
667 struct mlx5_txq_data *txq;
668 struct mlx5_txq_ctrl *txq_ctrl;
669 unsigned int i;
670 int ret;
671
672 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
673 for (i = 0; i != priv->txqs_n; ++i) {
674 if (!(*priv->txqs)[i])
675 continue;
676 txq = (*priv->txqs)[i];
677 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
678 if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
679 continue;
680 MLX5_ASSERT(txq->idx == (uint16_t)i);
681 ret = txq_uar_init_secondary(txq_ctrl, fd);
682 if (ret)
683 goto error;
684 }
685 return 0;
686 error:
687 /* Rollback. */
688 do {
689 if (!(*priv->txqs)[i])
690 continue;
691 txq = (*priv->txqs)[i];
692 txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
693 txq_uar_uninit_secondary(txq_ctrl);
694 } while (i--);
695 return -rte_errno;
696 }
697
698 /**
699 * Verify the Verbs Tx queue list is empty
700 *
701 * @param dev
702 * Pointer to Ethernet device.
703 *
704 * @return
705 * The number of object not released.
706 */
707 int
mlx5_txq_obj_verify(struct rte_eth_dev * dev)708 mlx5_txq_obj_verify(struct rte_eth_dev *dev)
709 {
710 struct mlx5_priv *priv = dev->data->dev_private;
711 int ret = 0;
712 struct mlx5_txq_obj *txq_obj;
713
714 LIST_FOREACH(txq_obj, &priv->txqsobj, next) {
715 DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
716 dev->data->port_id, txq_obj->txq_ctrl->txq.idx);
717 ++ret;
718 }
719 return ret;
720 }
721
722 /**
723 * Calculate the total number of WQEBB for Tx queue.
724 *
725 * Simplified version of calc_sq_size() in rdma-core.
726 *
727 * @param txq_ctrl
728 * Pointer to Tx queue control structure.
729 *
730 * @return
731 * The number of WQEBB.
732 */
733 static int
txq_calc_wqebb_cnt(struct mlx5_txq_ctrl * txq_ctrl)734 txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
735 {
736 unsigned int wqe_size;
737 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
738
739 wqe_size = MLX5_WQE_CSEG_SIZE +
740 MLX5_WQE_ESEG_SIZE +
741 MLX5_WSEG_SIZE -
742 MLX5_ESEG_MIN_INLINE_SIZE +
743 txq_ctrl->max_inline_data;
744 return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
745 }
746
747 /**
748 * Calculate the maximal inline data size for Tx queue.
749 *
750 * @param txq_ctrl
751 * Pointer to Tx queue control structure.
752 *
753 * @return
754 * The maximal inline data size.
755 */
756 static unsigned int
txq_calc_inline_max(struct mlx5_txq_ctrl * txq_ctrl)757 txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
758 {
759 const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
760 struct mlx5_priv *priv = txq_ctrl->priv;
761 unsigned int wqe_size;
762
763 wqe_size = priv->sh->device_attr.max_qp_wr / desc;
764 if (!wqe_size)
765 return 0;
766 /*
767 * This calculation is derived from tthe source of
768 * mlx5_calc_send_wqe() in rdma_core library.
769 */
770 wqe_size = wqe_size * MLX5_WQE_SIZE -
771 MLX5_WQE_CSEG_SIZE -
772 MLX5_WQE_ESEG_SIZE -
773 MLX5_WSEG_SIZE -
774 MLX5_WSEG_SIZE +
775 MLX5_DSEG_MIN_INLINE_SIZE;
776 return wqe_size;
777 }
778
779 /**
780 * Set Tx queue parameters from device configuration.
781 *
782 * @param txq_ctrl
783 * Pointer to Tx queue control structure.
784 */
785 static void
txq_set_params(struct mlx5_txq_ctrl * txq_ctrl)786 txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
787 {
788 struct mlx5_priv *priv = txq_ctrl->priv;
789 struct mlx5_dev_config *config = &priv->config;
790 unsigned int inlen_send; /* Inline data for ordinary SEND.*/
791 unsigned int inlen_empw; /* Inline data for enhanced MPW. */
792 unsigned int inlen_mode; /* Minimal required Inline data. */
793 unsigned int txqs_inline; /* Min Tx queues to enable inline. */
794 uint64_t dev_txoff = priv->dev_data->dev_conf.txmode.offloads;
795 bool tso = txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
796 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
797 DEV_TX_OFFLOAD_GRE_TNL_TSO |
798 DEV_TX_OFFLOAD_IP_TNL_TSO |
799 DEV_TX_OFFLOAD_UDP_TNL_TSO);
800 bool vlan_inline;
801 unsigned int temp;
802
803 if (config->txqs_inline == MLX5_ARG_UNSET)
804 txqs_inline =
805 #if defined(RTE_ARCH_ARM64)
806 (priv->pci_dev->id.device_id ==
807 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
808 MLX5_INLINE_MAX_TXQS_BLUEFIELD :
809 #endif
810 MLX5_INLINE_MAX_TXQS;
811 else
812 txqs_inline = (unsigned int)config->txqs_inline;
813 inlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?
814 MLX5_SEND_DEF_INLINE_LEN :
815 (unsigned int)config->txq_inline_max;
816 inlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?
817 MLX5_EMPW_DEF_INLINE_LEN :
818 (unsigned int)config->txq_inline_mpw;
819 inlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?
820 0 : (unsigned int)config->txq_inline_min;
821 if (config->mps != MLX5_MPW_ENHANCED && config->mps != MLX5_MPW)
822 inlen_empw = 0;
823 /*
824 * If there is requested minimal amount of data to inline
825 * we MUST enable inlining. This is a case for ConnectX-4
826 * which usually requires L2 inlined for correct operating
827 * and ConnectX-4 Lx which requires L2-L4 inlined to
828 * support E-Switch Flows.
829 */
830 if (inlen_mode) {
831 if (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {
832 /*
833 * Optimize minimal inlining for single
834 * segment packets to fill one WQEBB
835 * without gaps.
836 */
837 temp = MLX5_ESEG_MIN_INLINE_SIZE;
838 } else {
839 temp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;
840 temp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +
841 MLX5_ESEG_MIN_INLINE_SIZE;
842 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
843 }
844 if (temp != inlen_mode) {
845 DRV_LOG(INFO,
846 "port %u minimal required inline setting"
847 " aligned from %u to %u",
848 PORT_ID(priv), inlen_mode, temp);
849 inlen_mode = temp;
850 }
851 }
852 /*
853 * If port is configured to support VLAN insertion and device
854 * does not support this feature by HW (for NICs before ConnectX-5
855 * or in case of wqe_vlan_insert flag is not set) we must enable
856 * data inline on all queues because it is supported by single
857 * tx_burst routine.
858 */
859 txq_ctrl->txq.vlan_en = config->hw_vlan_insert;
860 vlan_inline = (dev_txoff & DEV_TX_OFFLOAD_VLAN_INSERT) &&
861 !config->hw_vlan_insert;
862 /*
863 * If there are few Tx queues it is prioritized
864 * to save CPU cycles and disable data inlining at all.
865 */
866 if (inlen_send && priv->txqs_n >= txqs_inline) {
867 /*
868 * The data sent with ordinal MLX5_OPCODE_SEND
869 * may be inlined in Ethernet Segment, align the
870 * length accordingly to fit entire WQEBBs.
871 */
872 temp = RTE_MAX(inlen_send,
873 MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE);
874 temp -= MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
875 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
876 temp += MLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;
877 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
878 MLX5_ESEG_MIN_INLINE_SIZE -
879 MLX5_WQE_CSEG_SIZE -
880 MLX5_WQE_ESEG_SIZE -
881 MLX5_WQE_DSEG_SIZE * 2);
882 temp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);
883 temp = RTE_MAX(temp, inlen_mode);
884 if (temp != inlen_send) {
885 DRV_LOG(INFO,
886 "port %u ordinary send inline setting"
887 " aligned from %u to %u",
888 PORT_ID(priv), inlen_send, temp);
889 inlen_send = temp;
890 }
891 /*
892 * Not aligned to cache lines, but to WQEs.
893 * First bytes of data (initial alignment)
894 * is going to be copied explicitly at the
895 * beginning of inlining buffer in Ethernet
896 * Segment.
897 */
898 MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
899 MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
900 MLX5_ESEG_MIN_INLINE_SIZE -
901 MLX5_WQE_CSEG_SIZE -
902 MLX5_WQE_ESEG_SIZE -
903 MLX5_WQE_DSEG_SIZE * 2);
904 } else if (inlen_mode) {
905 /*
906 * If minimal inlining is requested we must
907 * enable inlining in general, despite the
908 * number of configured queues. Ignore the
909 * txq_inline_max devarg, this is not
910 * full-featured inline.
911 */
912 inlen_send = inlen_mode;
913 inlen_empw = 0;
914 } else if (vlan_inline) {
915 /*
916 * Hardware does not report offload for
917 * VLAN insertion, we must enable data inline
918 * to implement feature by software.
919 */
920 inlen_send = MLX5_ESEG_MIN_INLINE_SIZE;
921 inlen_empw = 0;
922 } else {
923 inlen_send = 0;
924 inlen_empw = 0;
925 }
926 txq_ctrl->txq.inlen_send = inlen_send;
927 txq_ctrl->txq.inlen_mode = inlen_mode;
928 txq_ctrl->txq.inlen_empw = 0;
929 if (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {
930 /*
931 * The data sent with MLX5_OPCODE_ENHANCED_MPSW
932 * may be inlined in Data Segment, align the
933 * length accordingly to fit entire WQEBBs.
934 */
935 temp = RTE_MAX(inlen_empw,
936 MLX5_WQE_SIZE + MLX5_DSEG_MIN_INLINE_SIZE);
937 temp -= MLX5_DSEG_MIN_INLINE_SIZE;
938 temp = RTE_ALIGN(temp, MLX5_WQE_SIZE);
939 temp += MLX5_DSEG_MIN_INLINE_SIZE;
940 temp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +
941 MLX5_DSEG_MIN_INLINE_SIZE -
942 MLX5_WQE_CSEG_SIZE -
943 MLX5_WQE_ESEG_SIZE -
944 MLX5_WQE_DSEG_SIZE);
945 temp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);
946 if (temp != inlen_empw) {
947 DRV_LOG(INFO,
948 "port %u enhanced empw inline setting"
949 " aligned from %u to %u",
950 PORT_ID(priv), inlen_empw, temp);
951 inlen_empw = temp;
952 }
953 MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
954 MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
955 MLX5_DSEG_MIN_INLINE_SIZE -
956 MLX5_WQE_CSEG_SIZE -
957 MLX5_WQE_ESEG_SIZE -
958 MLX5_WQE_DSEG_SIZE);
959 txq_ctrl->txq.inlen_empw = inlen_empw;
960 }
961 txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
962 if (tso) {
963 txq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;
964 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,
965 MLX5_MAX_TSO_HEADER);
966 txq_ctrl->txq.tso_en = 1;
967 }
968 txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
969 txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
970 DEV_TX_OFFLOAD_UDP_TNL_TSO |
971 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
972 txq_ctrl->txq.offloads) && config->swp;
973 }
974
975 /**
976 * Adjust Tx queue data inline parameters for large queue sizes.
977 * The data inline feature requires multiple WQEs to fit the packets,
978 * and if the large amount of Tx descriptors is requested by application
979 * the total WQE amount may exceed the hardware capabilities. If the
980 * default inline setting are used we can try to adjust these ones and
981 * meet the hardware requirements and not exceed the queue size.
982 *
983 * @param txq_ctrl
984 * Pointer to Tx queue control structure.
985 *
986 * @return
987 * Zero on success, otherwise the parameters can not be adjusted.
988 */
989 static int
txq_adjust_params(struct mlx5_txq_ctrl * txq_ctrl)990 txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
991 {
992 struct mlx5_priv *priv = txq_ctrl->priv;
993 struct mlx5_dev_config *config = &priv->config;
994 unsigned int max_inline;
995
996 max_inline = txq_calc_inline_max(txq_ctrl);
997 if (!txq_ctrl->txq.inlen_send) {
998 /*
999 * Inline data feature is not engaged at all.
1000 * There is nothing to adjust.
1001 */
1002 return 0;
1003 }
1004 if (txq_ctrl->max_inline_data <= max_inline) {
1005 /*
1006 * The requested inline data length does not
1007 * exceed queue capabilities.
1008 */
1009 return 0;
1010 }
1011 if (txq_ctrl->txq.inlen_mode > max_inline) {
1012 DRV_LOG(ERR,
1013 "minimal data inline requirements (%u) are not"
1014 " satisfied (%u) on port %u, try the smaller"
1015 " Tx queue size (%d)",
1016 txq_ctrl->txq.inlen_mode, max_inline,
1017 priv->dev_data->port_id,
1018 priv->sh->device_attr.max_qp_wr);
1019 goto error;
1020 }
1021 if (txq_ctrl->txq.inlen_send > max_inline &&
1022 config->txq_inline_max != MLX5_ARG_UNSET &&
1023 config->txq_inline_max > (int)max_inline) {
1024 DRV_LOG(ERR,
1025 "txq_inline_max requirements (%u) are not"
1026 " satisfied (%u) on port %u, try the smaller"
1027 " Tx queue size (%d)",
1028 txq_ctrl->txq.inlen_send, max_inline,
1029 priv->dev_data->port_id,
1030 priv->sh->device_attr.max_qp_wr);
1031 goto error;
1032 }
1033 if (txq_ctrl->txq.inlen_empw > max_inline &&
1034 config->txq_inline_mpw != MLX5_ARG_UNSET &&
1035 config->txq_inline_mpw > (int)max_inline) {
1036 DRV_LOG(ERR,
1037 "txq_inline_mpw requirements (%u) are not"
1038 " satisfied (%u) on port %u, try the smaller"
1039 " Tx queue size (%d)",
1040 txq_ctrl->txq.inlen_empw, max_inline,
1041 priv->dev_data->port_id,
1042 priv->sh->device_attr.max_qp_wr);
1043 goto error;
1044 }
1045 if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
1046 DRV_LOG(ERR,
1047 "tso header inline requirements (%u) are not"
1048 " satisfied (%u) on port %u, try the smaller"
1049 " Tx queue size (%d)",
1050 MLX5_MAX_TSO_HEADER, max_inline,
1051 priv->dev_data->port_id,
1052 priv->sh->device_attr.max_qp_wr);
1053 goto error;
1054 }
1055 if (txq_ctrl->txq.inlen_send > max_inline) {
1056 DRV_LOG(WARNING,
1057 "adjust txq_inline_max (%u->%u)"
1058 " due to large Tx queue on port %u",
1059 txq_ctrl->txq.inlen_send, max_inline,
1060 priv->dev_data->port_id);
1061 txq_ctrl->txq.inlen_send = max_inline;
1062 }
1063 if (txq_ctrl->txq.inlen_empw > max_inline) {
1064 DRV_LOG(WARNING,
1065 "adjust txq_inline_mpw (%u->%u)"
1066 "due to large Tx queue on port %u",
1067 txq_ctrl->txq.inlen_empw, max_inline,
1068 priv->dev_data->port_id);
1069 txq_ctrl->txq.inlen_empw = max_inline;
1070 }
1071 txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
1072 txq_ctrl->txq.inlen_empw);
1073 MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
1074 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
1075 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
1076 MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
1077 !txq_ctrl->txq.inlen_empw);
1078 return 0;
1079 error:
1080 rte_errno = ENOMEM;
1081 return -ENOMEM;
1082 }
1083
1084 /**
1085 * Create a DPDK Tx queue.
1086 *
1087 * @param dev
1088 * Pointer to Ethernet device.
1089 * @param idx
1090 * TX queue index.
1091 * @param desc
1092 * Number of descriptors to configure in queue.
1093 * @param socket
1094 * NUMA socket on which memory must be allocated.
1095 * @param[in] conf
1096 * Thresholds parameters.
1097 *
1098 * @return
1099 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1100 */
1101 struct mlx5_txq_ctrl *
mlx5_txq_new(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,unsigned int socket,const struct rte_eth_txconf * conf)1102 mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1103 unsigned int socket, const struct rte_eth_txconf *conf)
1104 {
1105 struct mlx5_priv *priv = dev->data->dev_private;
1106 struct mlx5_txq_ctrl *tmpl;
1107
1108 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
1109 desc * sizeof(struct rte_mbuf *), 0, socket);
1110 if (!tmpl) {
1111 rte_errno = ENOMEM;
1112 return NULL;
1113 }
1114 if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
1115 MLX5_MR_BTREE_CACHE_N, socket)) {
1116 /* rte_errno is already set. */
1117 goto error;
1118 }
1119 /* Save pointer of global generation number to check memory event. */
1120 tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->share_cache.dev_gen;
1121 MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
1122 tmpl->txq.offloads = conf->offloads |
1123 dev->data->dev_conf.txmode.offloads;
1124 tmpl->priv = priv;
1125 tmpl->socket = socket;
1126 tmpl->txq.elts_n = log2above(desc);
1127 tmpl->txq.elts_s = desc;
1128 tmpl->txq.elts_m = desc - 1;
1129 tmpl->txq.port_id = dev->data->port_id;
1130 tmpl->txq.idx = idx;
1131 txq_set_params(tmpl);
1132 if (txq_adjust_params(tmpl))
1133 goto error;
1134 if (txq_calc_wqebb_cnt(tmpl) >
1135 priv->sh->device_attr.max_qp_wr) {
1136 DRV_LOG(ERR,
1137 "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
1138 " try smaller queue size",
1139 dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
1140 priv->sh->device_attr.max_qp_wr);
1141 rte_errno = ENOMEM;
1142 goto error;
1143 }
1144 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1145 tmpl->type = MLX5_TXQ_TYPE_STANDARD;
1146 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1147 return tmpl;
1148 error:
1149 mlx5_free(tmpl);
1150 return NULL;
1151 }
1152
1153 /**
1154 * Create a DPDK Tx hairpin queue.
1155 *
1156 * @param dev
1157 * Pointer to Ethernet device.
1158 * @param idx
1159 * TX queue index.
1160 * @param desc
1161 * Number of descriptors to configure in queue.
1162 * @param hairpin_conf
1163 * The hairpin configuration.
1164 *
1165 * @return
1166 * A DPDK queue object on success, NULL otherwise and rte_errno is set.
1167 */
1168 struct mlx5_txq_ctrl *
mlx5_txq_hairpin_new(struct rte_eth_dev * dev,uint16_t idx,uint16_t desc,const struct rte_eth_hairpin_conf * hairpin_conf)1169 mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1170 const struct rte_eth_hairpin_conf *hairpin_conf)
1171 {
1172 struct mlx5_priv *priv = dev->data->dev_private;
1173 struct mlx5_txq_ctrl *tmpl;
1174
1175 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1176 SOCKET_ID_ANY);
1177 if (!tmpl) {
1178 rte_errno = ENOMEM;
1179 return NULL;
1180 }
1181 tmpl->priv = priv;
1182 tmpl->socket = SOCKET_ID_ANY;
1183 tmpl->txq.elts_n = log2above(desc);
1184 tmpl->txq.port_id = dev->data->port_id;
1185 tmpl->txq.idx = idx;
1186 tmpl->hairpin_conf = *hairpin_conf;
1187 tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
1188 __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1189 LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
1190 return tmpl;
1191 }
1192
1193 /**
1194 * Get a Tx queue.
1195 *
1196 * @param dev
1197 * Pointer to Ethernet device.
1198 * @param idx
1199 * TX queue index.
1200 *
1201 * @return
1202 * A pointer to the queue if it exists.
1203 */
1204 struct mlx5_txq_ctrl *
mlx5_txq_get(struct rte_eth_dev * dev,uint16_t idx)1205 mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
1206 {
1207 struct mlx5_priv *priv = dev->data->dev_private;
1208 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1209 struct mlx5_txq_ctrl *ctrl = NULL;
1210
1211 if (txq_data) {
1212 ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
1213 __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
1214 }
1215 return ctrl;
1216 }
1217
1218 /**
1219 * Release a Tx queue.
1220 *
1221 * @param dev
1222 * Pointer to Ethernet device.
1223 * @param idx
1224 * TX queue index.
1225 *
1226 * @return
1227 * 1 while a reference on it exists, 0 when freed.
1228 */
1229 int
mlx5_txq_release(struct rte_eth_dev * dev,uint16_t idx)1230 mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
1231 {
1232 struct mlx5_priv *priv = dev->data->dev_private;
1233 struct mlx5_txq_ctrl *txq_ctrl;
1234
1235 if (!(*priv->txqs)[idx])
1236 return 0;
1237 txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1238 if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1239 return 1;
1240 if (txq_ctrl->obj) {
1241 priv->obj_ops.txq_obj_release(txq_ctrl->obj);
1242 LIST_REMOVE(txq_ctrl->obj, next);
1243 mlx5_free(txq_ctrl->obj);
1244 txq_ctrl->obj = NULL;
1245 }
1246 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
1247 if (txq_ctrl->txq.fcqs) {
1248 mlx5_free(txq_ctrl->txq.fcqs);
1249 txq_ctrl->txq.fcqs = NULL;
1250 }
1251 txq_free_elts(txq_ctrl);
1252 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1253 }
1254 if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1255 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
1256 mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
1257 LIST_REMOVE(txq_ctrl, next);
1258 mlx5_free(txq_ctrl);
1259 (*priv->txqs)[idx] = NULL;
1260 }
1261 return 0;
1262 }
1263
1264 /**
1265 * Verify if the queue can be released.
1266 *
1267 * @param dev
1268 * Pointer to Ethernet device.
1269 * @param idx
1270 * TX queue index.
1271 *
1272 * @return
1273 * 1 if the queue can be released.
1274 */
1275 int
mlx5_txq_releasable(struct rte_eth_dev * dev,uint16_t idx)1276 mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
1277 {
1278 struct mlx5_priv *priv = dev->data->dev_private;
1279 struct mlx5_txq_ctrl *txq;
1280
1281 if (!(*priv->txqs)[idx])
1282 return -1;
1283 txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
1284 return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
1285 }
1286
1287 /**
1288 * Verify the Tx Queue list is empty
1289 *
1290 * @param dev
1291 * Pointer to Ethernet device.
1292 *
1293 * @return
1294 * The number of object not released.
1295 */
1296 int
mlx5_txq_verify(struct rte_eth_dev * dev)1297 mlx5_txq_verify(struct rte_eth_dev *dev)
1298 {
1299 struct mlx5_priv *priv = dev->data->dev_private;
1300 struct mlx5_txq_ctrl *txq_ctrl;
1301 int ret = 0;
1302
1303 LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
1304 DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
1305 dev->data->port_id, txq_ctrl->txq.idx);
1306 ++ret;
1307 }
1308 return ret;
1309 }
1310
1311 /**
1312 * Set the Tx queue dynamic timestamp (mask and offset)
1313 *
1314 * @param[in] dev
1315 * Pointer to the Ethernet device structure.
1316 */
1317 void
mlx5_txq_dynf_timestamp_set(struct rte_eth_dev * dev)1318 mlx5_txq_dynf_timestamp_set(struct rte_eth_dev *dev)
1319 {
1320 struct mlx5_priv *priv = dev->data->dev_private;
1321 struct mlx5_dev_ctx_shared *sh = priv->sh;
1322 struct mlx5_txq_data *data;
1323 int off, nbit;
1324 unsigned int i;
1325 uint64_t mask = 0;
1326
1327 nbit = rte_mbuf_dynflag_lookup
1328 (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
1329 off = rte_mbuf_dynfield_lookup
1330 (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
1331 if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
1332 mask = 1ULL << nbit;
1333 for (i = 0; i != priv->txqs_n; ++i) {
1334 data = (*priv->txqs)[i];
1335 if (!data)
1336 continue;
1337 data->sh = sh;
1338 data->ts_mask = mask;
1339 data->ts_offset = off;
1340 }
1341 }
1342