Lines Matching refs:dev

30 mlx5_txq_stop(struct rte_eth_dev *dev)  in mlx5_txq_stop()  argument
32 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_txq_stop()
36 mlx5_txq_release(dev, i); in mlx5_txq_stop()
49 mlx5_txq_start(struct rte_eth_dev *dev) in mlx5_txq_start() argument
51 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_txq_start()
56 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); in mlx5_txq_start()
69 "memory resources.", dev->data->port_id, in mlx5_txq_start()
74 ret = priv->obj_ops.txq_obj_new(dev, i); in mlx5_txq_start()
89 dev->data->port_id, i); in mlx5_txq_start()
95 dev->data->port_id, i, (void *)&txq_ctrl->obj); in mlx5_txq_start()
102 mlx5_txq_release(dev, i); in mlx5_txq_start()
155 mlx5_rxq_stop(struct rte_eth_dev *dev) in mlx5_rxq_stop() argument
157 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_rxq_stop()
161 mlx5_rxq_release(dev, i); in mlx5_rxq_stop()
165 mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl, in mlx5_rxq_ctrl_prepare() argument
188 dev->data->port_id, idx); in mlx5_rxq_ctrl_prepare()
192 DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id, in mlx5_rxq_ctrl_prepare()
207 mlx5_rxq_start(struct rte_eth_dev *dev) in mlx5_rxq_start() argument
209 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_rxq_start()
214 if (mlx5_mprq_alloc_mp(dev)) { in mlx5_rxq_start()
219 dev->data->port_id, priv->sh->dev_cap.max_qp_wr); in mlx5_rxq_start()
221 dev->data->port_id, priv->sh->dev_cap.max_sge); in mlx5_rxq_start()
223 struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i); in mlx5_rxq_start()
230 if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0) in mlx5_rxq_start()
246 mlx5_rxq_release(dev, i); in mlx5_rxq_start()
264 mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) in mlx5_hairpin_auto_bind() argument
266 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_hairpin_auto_bind()
277 uint16_t self_port = dev->data->port_id; in mlx5_hairpin_auto_bind()
280 txq_ctrl = mlx5_txq_get(dev, i); in mlx5_hairpin_auto_bind()
285 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
289 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
293 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
298 txq_ctrl = mlx5_txq_get(dev, i); in mlx5_hairpin_auto_bind()
304 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
310 dev->data->port_id, i); in mlx5_hairpin_auto_bind()
311 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
315 rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue); in mlx5_hairpin_auto_bind()
317 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
320 dev->data->port_id, in mlx5_hairpin_auto_bind()
329 "Rx queue %d", dev->data->port_id, in mlx5_hairpin_auto_bind()
337 dev->data->port_id, in mlx5_hairpin_auto_bind()
360 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
364 mlx5_txq_release(dev, i); in mlx5_hairpin_auto_bind()
386 mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue, in mlx5_hairpin_queue_peer_update() argument
391 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_hairpin_queue_peer_update()
394 if (dev->data->dev_started == 0) { in mlx5_hairpin_queue_peer_update()
397 dev->data->port_id); in mlx5_hairpin_queue_peer_update()
408 txq_ctrl = mlx5_txq_get(dev, peer_queue); in mlx5_hairpin_queue_peer_update()
412 dev->data->port_id, peer_queue); in mlx5_hairpin_queue_peer_update()
418 dev->data->port_id, peer_queue); in mlx5_hairpin_queue_peer_update()
419 mlx5_txq_release(dev, peer_queue); in mlx5_hairpin_queue_peer_update()
425 dev->data->port_id, peer_queue); in mlx5_hairpin_queue_peer_update()
426 mlx5_txq_release(dev, peer_queue); in mlx5_hairpin_queue_peer_update()
435 mlx5_txq_release(dev, peer_queue); in mlx5_hairpin_queue_peer_update()
437 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue); in mlx5_hairpin_queue_peer_update()
443 dev->data->port_id, peer_queue); in mlx5_hairpin_queue_peer_update()
450 dev->data->port_id, peer_queue); in mlx5_hairpin_queue_peer_update()
456 dev->data->port_id, peer_queue); in mlx5_hairpin_queue_peer_update()
486 mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue, in mlx5_hairpin_queue_peer_bind() argument
499 dev->data->port_id, cur_queue, peer_info->peer_q); in mlx5_hairpin_queue_peer_bind()
506 txq_ctrl = mlx5_txq_get(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
510 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
516 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
517 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
523 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
524 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
529 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
530 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
541 " mismatch", dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
542 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
549 " mismatch", dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
550 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
560 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
562 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue); in mlx5_hairpin_queue_peer_bind()
569 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
576 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
582 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
587 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
594 " mismatch", dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
601 " mismatch", dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_bind()
631 mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue, in mlx5_hairpin_queue_peer_unbind() argument
640 txq_ctrl = mlx5_txq_get(dev, cur_queue); in mlx5_hairpin_queue_peer_unbind()
644 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
650 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
651 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_unbind()
657 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
658 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_unbind()
664 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
665 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_unbind()
673 mlx5_txq_release(dev, cur_queue); in mlx5_hairpin_queue_peer_unbind()
675 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue); in mlx5_hairpin_queue_peer_unbind()
682 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
689 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
694 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
700 dev->data->port_id, cur_queue); in mlx5_hairpin_queue_peer_unbind()
725 mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) in mlx5_hairpin_bind_single_port() argument
727 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_hairpin_bind_single_port()
740 if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) { in mlx5_hairpin_bind_single_port()
751 txq_ctrl = mlx5_txq_get(dev, i); in mlx5_hairpin_bind_single_port()
755 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
778 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
784 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
791 txq_ctrl = mlx5_txq_get(dev, i); in mlx5_hairpin_bind_single_port()
795 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
799 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
810 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
814 ret = mlx5_hairpin_queue_peer_bind(dev, i, &peer, 1); in mlx5_hairpin_bind_single_port()
816 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
832 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
835 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
845 txq_ctrl = mlx5_txq_get(dev, i); in mlx5_hairpin_bind_single_port()
850 mlx5_hairpin_queue_peer_unbind(dev, i, 1); in mlx5_hairpin_bind_single_port()
851 mlx5_txq_release(dev, i); in mlx5_hairpin_bind_single_port()
870 mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port) in mlx5_hairpin_unbind_single_port() argument
872 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_hairpin_unbind_single_port()
878 if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) { in mlx5_hairpin_unbind_single_port()
886 txq_ctrl = mlx5_txq_get(dev, i); in mlx5_hairpin_unbind_single_port()
890 mlx5_txq_release(dev, i); in mlx5_hairpin_unbind_single_port()
894 mlx5_txq_release(dev, i); in mlx5_hairpin_unbind_single_port()
903 mlx5_txq_release(dev, i); in mlx5_hairpin_unbind_single_port()
910 mlx5_txq_release(dev, i); in mlx5_hairpin_unbind_single_port()
917 ret = mlx5_hairpin_queue_peer_unbind(dev, i, 1); in mlx5_hairpin_unbind_single_port()
932 mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port) in mlx5_hairpin_bind() argument
944 MLX5_ETH_FOREACH_DEV(p, dev->device) { in mlx5_hairpin_bind()
945 ret = mlx5_hairpin_bind_single_port(dev, p); in mlx5_hairpin_bind()
951 return mlx5_hairpin_bind_single_port(dev, rx_port); in mlx5_hairpin_bind()
954 MLX5_ETH_FOREACH_DEV(pp, dev->device) in mlx5_hairpin_bind()
956 mlx5_hairpin_unbind_single_port(dev, pp); in mlx5_hairpin_bind()
965 mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port) in mlx5_hairpin_unbind() argument
971 MLX5_ETH_FOREACH_DEV(p, dev->device) { in mlx5_hairpin_unbind()
972 ret = mlx5_hairpin_unbind_single_port(dev, p); in mlx5_hairpin_unbind()
977 ret = mlx5_hairpin_unbind_single_port(dev, rx_port); in mlx5_hairpin_unbind()
1004 mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports, in mlx5_hairpin_get_peer_ports() argument
1007 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_hairpin_get_peer_ports()
1016 txq_ctrl = mlx5_txq_get(dev, i); in mlx5_hairpin_get_peer_ports()
1020 mlx5_txq_release(dev, i); in mlx5_hairpin_get_peer_ports()
1026 mlx5_txq_release(dev, i); in mlx5_hairpin_get_peer_ports()
1033 mlx5_txq_release(dev, i); in mlx5_hairpin_get_peer_ports()
1037 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); in mlx5_hairpin_get_peer_ports()
1080 mlx5_dev_start(struct rte_eth_dev *dev) in mlx5_dev_start() argument
1082 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_dev_start()
1086 DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id); in mlx5_dev_start()
1093 if (dev->data->nb_rx_queues > 0) { in mlx5_dev_start()
1094 ret = mlx5_dev_configure_rss_reta(dev); in mlx5_dev_start()
1097 dev->data->port_id, strerror(rte_errno)); in mlx5_dev_start()
1101 ret = mlx5_txpp_start(dev); in mlx5_dev_start()
1104 dev->data->port_id, strerror(rte_errno)); in mlx5_dev_start()
1109 ret = priv->obj_ops.lb_dummy_queue_create(dev); in mlx5_dev_start()
1113 ret = mlx5_txq_start(dev); in mlx5_dev_start()
1116 dev->data->port_id, strerror(rte_errno)); in mlx5_dev_start()
1122 ret = mlx5_get_flag_dropless_rq(dev); in mlx5_dev_start()
1126 dev->data->port_id); in mlx5_dev_start()
1130 dev->data->port_id); in mlx5_dev_start()
1134 dev->data->port_id); in mlx5_dev_start()
1137 ret = mlx5_rxq_start(dev); in mlx5_dev_start()
1140 dev->data->port_id, strerror(rte_errno)); in mlx5_dev_start()
1147 ret = mlx5_hairpin_auto_bind(dev); in mlx5_dev_start()
1150 dev->data->port_id, strerror(rte_errno)); in mlx5_dev_start()
1154 dev->data->dev_started = 1; in mlx5_dev_start()
1155 ret = mlx5_rx_intr_vec_enable(dev); in mlx5_dev_start()
1158 dev->data->port_id); in mlx5_dev_start()
1161 mlx5_os_stats_init(dev); in mlx5_dev_start()
1166 ret = mlx5_action_handle_attach(dev); in mlx5_dev_start()
1170 dev->data->port_id, rte_strerror(rte_errno)); in mlx5_dev_start()
1173 ret = mlx5_traffic_enable(dev); in mlx5_dev_start()
1176 dev->data->port_id); in mlx5_dev_start()
1180 mlx5_flow_rxq_dynf_metadata_set(dev); in mlx5_dev_start()
1182 mlx5_rxq_timestamp_set(dev); in mlx5_dev_start()
1184 mlx5_txq_dynf_timestamp_set(dev); in mlx5_dev_start()
1190 ret = mlx5_flow_start_default(dev); in mlx5_dev_start()
1193 dev->data->port_id, strerror(rte_errno)); in mlx5_dev_start()
1196 if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) { in mlx5_dev_start()
1198 dev->data->port_id, rte_strerror(rte_errno)); in mlx5_dev_start()
1202 dev->tx_pkt_burst = mlx5_select_tx_function(dev); in mlx5_dev_start()
1203 dev->rx_pkt_burst = mlx5_select_rx_function(dev); in mlx5_dev_start()
1205 mlx5_mp_os_req_start_rxtx(dev); in mlx5_dev_start()
1208 (uint32_t)dev->data->port_id; in mlx5_dev_start()
1211 dev->data->port_id); in mlx5_dev_start()
1212 dev->data->dev_conf.intr_conf.rmv = 0; in mlx5_dev_start()
1216 (uint32_t)dev->data->port_id; in mlx5_dev_start()
1219 dev->data->port_id); in mlx5_dev_start()
1220 dev->data->dev_conf.intr_conf.lsc = 0; in mlx5_dev_start()
1224 (uint32_t)dev->data->port_id; in mlx5_dev_start()
1229 dev->data->dev_started = 0; in mlx5_dev_start()
1230 mlx5_flow_stop_default(dev); in mlx5_dev_start()
1231 mlx5_traffic_disable(dev); in mlx5_dev_start()
1232 mlx5_txq_stop(dev); in mlx5_dev_start()
1233 mlx5_rxq_stop(dev); in mlx5_dev_start()
1235 priv->obj_ops.lb_dummy_queue_release(dev); in mlx5_dev_start()
1236 mlx5_txpp_stop(dev); /* Stop last. */ in mlx5_dev_start()
1250 mlx5_dev_stop(struct rte_eth_dev *dev) in mlx5_dev_stop() argument
1252 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_dev_stop()
1254 dev->data->dev_started = 0; in mlx5_dev_stop()
1256 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; in mlx5_dev_stop()
1257 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; in mlx5_dev_stop()
1260 mlx5_mp_os_req_stop_rxtx(dev); in mlx5_dev_stop()
1262 DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); in mlx5_dev_stop()
1263 mlx5_flow_stop_default(dev); in mlx5_dev_stop()
1265 mlx5_traffic_disable(dev); in mlx5_dev_stop()
1267 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true); in mlx5_dev_stop()
1268 mlx5_flow_meter_rxq_flush(dev); in mlx5_dev_stop()
1269 mlx5_action_handle_detach(dev); in mlx5_dev_stop()
1270 mlx5_rx_intr_vec_disable(dev); in mlx5_dev_stop()
1274 mlx5_txq_stop(dev); in mlx5_dev_stop()
1275 mlx5_rxq_stop(dev); in mlx5_dev_stop()
1277 priv->obj_ops.lb_dummy_queue_release(dev); in mlx5_dev_stop()
1278 mlx5_txpp_stop(dev); in mlx5_dev_stop()
1293 mlx5_traffic_enable(struct rte_eth_dev *dev) in mlx5_traffic_enable() argument
1295 struct mlx5_priv *priv = dev->data->dev_private; in mlx5_traffic_enable()
1325 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); in mlx5_traffic_enable()
1333 ret = mlx5_ctrl_flow_source_queue(dev, i); in mlx5_traffic_enable()
1335 mlx5_txq_release(dev, i); in mlx5_traffic_enable()
1340 if (mlx5_flow_create_devx_sq_miss_flow(dev, i) == 0) { in mlx5_traffic_enable()
1343 dev->data->port_id, i); in mlx5_traffic_enable()
1347 mlx5_txq_release(dev, i); in mlx5_traffic_enable()
1350 if (mlx5_flow_create_esw_table_zero_flow(dev)) in mlx5_traffic_enable()
1355 " supported.", dev->data->port_id); in mlx5_traffic_enable()
1358 ret = mlx5_flow_lacp_miss(dev); in mlx5_traffic_enable()
1361 "forward LACP to kernel.", dev->data->port_id); in mlx5_traffic_enable()
1364 , dev->data->port_id); in mlx5_traffic_enable()
1368 if (dev->data->promiscuous) { in mlx5_traffic_enable()
1375 ret = mlx5_ctrl_flow(dev, &promisc, &promisc); in mlx5_traffic_enable()
1379 if (dev->data->all_multicast) { in mlx5_traffic_enable()
1386 ret = mlx5_ctrl_flow(dev, &multicast, &multicast); in mlx5_traffic_enable()
1400 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast, in mlx5_traffic_enable()
1404 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec, in mlx5_traffic_enable()
1411 ret = mlx5_ctrl_flow(dev, &bcast, &bcast); in mlx5_traffic_enable()
1414 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, in mlx5_traffic_enable()
1426 struct rte_ether_addr *mac = &dev->data->mac_addrs[i]; in mlx5_traffic_enable()
1442 ret = mlx5_ctrl_flow_vlan(dev, &unicast, in mlx5_traffic_enable()
1450 ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask); in mlx5_traffic_enable()
1458 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false); in mlx5_traffic_enable()
1471 mlx5_traffic_disable(struct rte_eth_dev *dev) in mlx5_traffic_disable() argument
1473 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false); in mlx5_traffic_disable()
1486 mlx5_traffic_restart(struct rte_eth_dev *dev) in mlx5_traffic_restart() argument
1488 if (dev->data->dev_started) { in mlx5_traffic_restart()
1489 mlx5_traffic_disable(dev); in mlx5_traffic_restart()
1490 return mlx5_traffic_enable(dev); in mlx5_traffic_restart()