1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 21 #include "dpaa2_pmd_logs.h" 22 #include <fslmc_vfio.h> 23 #include <dpaa2_hw_pvt.h> 24 #include <dpaa2_hw_mempool.h> 25 #include <dpaa2_hw_dpio.h> 26 #include <mc/fsl_dpmng.h> 27 #include "dpaa2_ethdev.h" 28 #include <fsl_qbman_debug.h> 29 30 /* Supported Rx offloads */ 31 static uint64_t dev_rx_offloads_sup = 32 DEV_RX_OFFLOAD_VLAN_STRIP | 33 DEV_RX_OFFLOAD_IPV4_CKSUM | 34 DEV_RX_OFFLOAD_UDP_CKSUM | 35 DEV_RX_OFFLOAD_TCP_CKSUM | 36 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 37 DEV_RX_OFFLOAD_VLAN_FILTER | 38 DEV_RX_OFFLOAD_JUMBO_FRAME; 39 40 /* Rx offloads which cannot be disabled */ 41 static uint64_t dev_rx_offloads_nodis = 42 DEV_RX_OFFLOAD_SCATTER; 43 44 /* Supported Tx offloads */ 45 static uint64_t dev_tx_offloads_sup = 46 DEV_TX_OFFLOAD_VLAN_INSERT | 47 DEV_TX_OFFLOAD_IPV4_CKSUM | 48 DEV_TX_OFFLOAD_UDP_CKSUM | 49 DEV_TX_OFFLOAD_TCP_CKSUM | 50 DEV_TX_OFFLOAD_SCTP_CKSUM | 51 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 52 53 /* Tx offloads which cannot be disabled */ 54 static uint64_t dev_tx_offloads_nodis = 55 DEV_TX_OFFLOAD_MULTI_SEGS | 56 DEV_TX_OFFLOAD_MT_LOCKFREE | 57 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 58 59 struct rte_dpaa2_xstats_name_off { 60 char name[RTE_ETH_XSTATS_NAME_SIZE]; 61 uint8_t page_id; /* dpni statistics page id */ 62 uint8_t stats_id; /* stats id in the given page */ 63 }; 64 65 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 66 {"ingress_multicast_frames", 0, 2}, 67 {"ingress_multicast_bytes", 0, 3}, 68 {"ingress_broadcast_frames", 0, 4}, 69 {"ingress_broadcast_bytes", 0, 5}, 70 {"egress_multicast_frames", 1, 2}, 71 {"egress_multicast_bytes", 1, 3}, 72 {"egress_broadcast_frames", 1, 4}, 73 {"egress_broadcast_bytes", 1, 5}, 74 {"ingress_filtered_frames", 2, 0}, 75 {"ingress_discarded_frames", 2, 1}, 76 {"ingress_nobuffer_discards", 2, 2}, 77 {"egress_discarded_frames", 2, 3}, 78 {"egress_confirmed_frames", 2, 4}, 79 }; 80 81 static struct rte_dpaa2_driver rte_dpaa2_pmd; 82 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 83 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 84 int wait_to_complete); 85 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 86 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 87 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 88 89 int dpaa2_logtype_pmd; 90 91 static int 92 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 93 { 94 int ret; 95 struct dpaa2_dev_priv *priv = dev->data->dev_private; 96 struct fsl_mc_io *dpni = priv->hw; 97 98 PMD_INIT_FUNC_TRACE(); 99 100 if (dpni == NULL) { 101 DPAA2_PMD_ERR("dpni is NULL"); 102 return -1; 103 } 104 105 if (on) 106 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, 107 priv->token, vlan_id); 108 else 109 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 110 priv->token, vlan_id); 111 112 if (ret < 0) 113 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 114 ret, vlan_id, priv->hw_id); 115 116 return ret; 117 } 118 119 static int 120 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 121 { 122 struct dpaa2_dev_priv *priv = dev->data->dev_private; 123 struct fsl_mc_io *dpni = priv->hw; 124 int ret; 125 126 PMD_INIT_FUNC_TRACE(); 127 128 if (mask & ETH_VLAN_FILTER_MASK) { 129 /* VLAN Filter not avaialble */ 130 if (!priv->max_vlan_filters) { 131 DPAA2_PMD_INFO("VLAN filter not available"); 132 goto next_mask; 133 } 134 135 if (dev->data->dev_conf.rxmode.offloads & 136 DEV_RX_OFFLOAD_VLAN_FILTER) 137 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 138 priv->token, true); 139 else 140 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 141 priv->token, false); 142 if (ret < 0) 143 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 144 } 145 next_mask: 146 if (mask & ETH_VLAN_EXTEND_MASK) { 147 if (dev->data->dev_conf.rxmode.offloads & 148 DEV_RX_OFFLOAD_VLAN_EXTEND) 149 DPAA2_PMD_INFO("VLAN extend offload not supported"); 150 } 151 152 return 0; 153 } 154 155 static int 156 dpaa2_fw_version_get(struct rte_eth_dev *dev, 157 char *fw_version, 158 size_t fw_size) 159 { 160 int ret; 161 struct dpaa2_dev_priv *priv = dev->data->dev_private; 162 struct fsl_mc_io *dpni = priv->hw; 163 struct mc_soc_version mc_plat_info = {0}; 164 struct mc_version mc_ver_info = {0}; 165 166 PMD_INIT_FUNC_TRACE(); 167 168 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 169 DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 170 171 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 172 DPAA2_PMD_WARN("\tmc_get_version failed"); 173 174 ret = snprintf(fw_version, fw_size, 175 "%x-%d.%d.%d", 176 mc_plat_info.svr, 177 mc_ver_info.major, 178 mc_ver_info.minor, 179 mc_ver_info.revision); 180 181 ret += 1; /* add the size of '\0' */ 182 if (fw_size < (uint32_t)ret) 183 return ret; 184 else 185 return 0; 186 } 187 188 static void 189 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 190 { 191 struct dpaa2_dev_priv *priv = dev->data->dev_private; 192 193 PMD_INIT_FUNC_TRACE(); 194 195 dev_info->if_index = priv->hw_id; 196 197 dev_info->max_mac_addrs = priv->max_mac_filters; 198 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 199 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 200 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 201 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 202 dev_info->rx_offload_capa = dev_rx_offloads_sup | 203 dev_rx_offloads_nodis; 204 dev_info->tx_offload_capa = dev_tx_offloads_sup | 205 dev_tx_offloads_nodis; 206 dev_info->speed_capa = ETH_LINK_SPEED_1G | 207 ETH_LINK_SPEED_2_5G | 208 ETH_LINK_SPEED_10G; 209 210 dev_info->max_hash_mac_addrs = 0; 211 dev_info->max_vfs = 0; 212 dev_info->max_vmdq_pools = ETH_16_POOLS; 213 dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; 214 } 215 216 static int 217 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 218 { 219 struct dpaa2_dev_priv *priv = dev->data->dev_private; 220 uint16_t dist_idx; 221 uint32_t vq_id; 222 struct dpaa2_queue *mc_q, *mcq; 223 uint32_t tot_queues; 224 int i; 225 struct dpaa2_queue *dpaa2_q; 226 227 PMD_INIT_FUNC_TRACE(); 228 229 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 230 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 231 RTE_CACHE_LINE_SIZE); 232 if (!mc_q) { 233 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 234 return -1; 235 } 236 237 for (i = 0; i < priv->nb_rx_queues; i++) { 238 mc_q->dev = dev; 239 priv->rx_vq[i] = mc_q++; 240 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 241 dpaa2_q->q_storage = rte_malloc("dq_storage", 242 sizeof(struct queue_storage_info_t), 243 RTE_CACHE_LINE_SIZE); 244 if (!dpaa2_q->q_storage) 245 goto fail; 246 247 memset(dpaa2_q->q_storage, 0, 248 sizeof(struct queue_storage_info_t)); 249 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 250 goto fail; 251 } 252 253 for (i = 0; i < priv->nb_tx_queues; i++) { 254 mc_q->dev = dev; 255 mc_q->flow_id = 0xffff; 256 priv->tx_vq[i] = mc_q++; 257 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 258 dpaa2_q->cscn = rte_malloc(NULL, 259 sizeof(struct qbman_result), 16); 260 if (!dpaa2_q->cscn) 261 goto fail_tx; 262 } 263 264 vq_id = 0; 265 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 266 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 267 mcq->tc_index = DPAA2_DEF_TC; 268 mcq->flow_id = dist_idx; 269 vq_id++; 270 } 271 272 return 0; 273 fail_tx: 274 i -= 1; 275 while (i >= 0) { 276 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 277 rte_free(dpaa2_q->cscn); 278 priv->tx_vq[i--] = NULL; 279 } 280 i = priv->nb_rx_queues; 281 fail: 282 i -= 1; 283 mc_q = priv->rx_vq[0]; 284 while (i >= 0) { 285 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 286 dpaa2_free_dq_storage(dpaa2_q->q_storage); 287 rte_free(dpaa2_q->q_storage); 288 priv->rx_vq[i--] = NULL; 289 } 290 rte_free(mc_q); 291 return -1; 292 } 293 294 static void 295 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) 296 { 297 struct dpaa2_dev_priv *priv = dev->data->dev_private; 298 struct dpaa2_queue *dpaa2_q; 299 int i; 300 301 PMD_INIT_FUNC_TRACE(); 302 303 /* Queue allocation base */ 304 if (priv->rx_vq[0]) { 305 /* cleaning up queue storage */ 306 for (i = 0; i < priv->nb_rx_queues; i++) { 307 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 308 if (dpaa2_q->q_storage) 309 rte_free(dpaa2_q->q_storage); 310 } 311 /* cleanup tx queue cscn */ 312 for (i = 0; i < priv->nb_tx_queues; i++) { 313 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 314 rte_free(dpaa2_q->cscn); 315 } 316 /*free memory for all queues (RX+TX) */ 317 rte_free(priv->rx_vq[0]); 318 priv->rx_vq[0] = NULL; 319 } 320 } 321 322 static int 323 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 324 { 325 struct dpaa2_dev_priv *priv = dev->data->dev_private; 326 struct fsl_mc_io *dpni = priv->hw; 327 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 328 uint64_t rx_offloads = eth_conf->rxmode.offloads; 329 uint64_t tx_offloads = eth_conf->txmode.offloads; 330 int rx_l3_csum_offload = false; 331 int rx_l4_csum_offload = false; 332 int tx_l3_csum_offload = false; 333 int tx_l4_csum_offload = false; 334 int ret; 335 336 PMD_INIT_FUNC_TRACE(); 337 338 /* Rx offloads validation */ 339 if (dev_rx_offloads_nodis & ~rx_offloads) { 340 DPAA2_PMD_WARN( 341 "Rx offloads non configurable - requested 0x%" PRIx64 342 " ignored 0x%" PRIx64, 343 rx_offloads, dev_rx_offloads_nodis); 344 } 345 346 /* Tx offloads validation */ 347 if (dev_tx_offloads_nodis & ~tx_offloads) { 348 DPAA2_PMD_WARN( 349 "Tx offloads non configurable - requested 0x%" PRIx64 350 " ignored 0x%" PRIx64, 351 tx_offloads, dev_tx_offloads_nodis); 352 } 353 354 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 355 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 356 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 357 priv->token, eth_conf->rxmode.max_rx_pkt_len); 358 if (ret) { 359 DPAA2_PMD_ERR( 360 "Unable to set mtu. check config"); 361 return ret; 362 } 363 } else { 364 return -1; 365 } 366 } 367 368 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 369 ret = dpaa2_setup_flow_dist(dev, 370 eth_conf->rx_adv_conf.rss_conf.rss_hf); 371 if (ret) { 372 DPAA2_PMD_ERR("Unable to set flow distribution." 373 "Check queue config"); 374 return ret; 375 } 376 } 377 378 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 379 rx_l3_csum_offload = true; 380 381 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || 382 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) 383 rx_l4_csum_offload = true; 384 385 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 386 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 387 if (ret) { 388 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 389 return ret; 390 } 391 392 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 393 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 394 if (ret) { 395 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 396 return ret; 397 } 398 399 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 400 tx_l3_csum_offload = true; 401 402 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || 403 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || 404 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) 405 tx_l4_csum_offload = true; 406 407 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 408 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 409 if (ret) { 410 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 411 return ret; 412 } 413 414 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 415 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 416 if (ret) { 417 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 418 return ret; 419 } 420 421 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 422 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 423 * to 0 for LS2 in the hardware thus disabling data/annotation 424 * stashing. For LX2 this is fixed in hardware and thus hash result and 425 * parse results can be received in FD using this option. 426 */ 427 if (dpaa2_svr_family == SVR_LX2160A) { 428 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 429 DPNI_FLCTYPE_HASH, true); 430 if (ret) { 431 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 432 return ret; 433 } 434 } 435 436 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 437 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 438 439 /* update the current status */ 440 dpaa2_dev_link_update(dev, 0); 441 442 return 0; 443 } 444 445 /* Function to setup RX flow information. It contains traffic class ID, 446 * flow ID, destination configuration etc. 447 */ 448 static int 449 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 450 uint16_t rx_queue_id, 451 uint16_t nb_rx_desc __rte_unused, 452 unsigned int socket_id __rte_unused, 453 const struct rte_eth_rxconf *rx_conf __rte_unused, 454 struct rte_mempool *mb_pool) 455 { 456 struct dpaa2_dev_priv *priv = dev->data->dev_private; 457 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 458 struct dpaa2_queue *dpaa2_q; 459 struct dpni_queue cfg; 460 uint8_t options = 0; 461 uint8_t flow_id; 462 uint32_t bpid; 463 int ret; 464 465 PMD_INIT_FUNC_TRACE(); 466 467 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 468 dev, rx_queue_id, mb_pool, rx_conf); 469 470 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 471 bpid = mempool_to_bpid(mb_pool); 472 ret = dpaa2_attach_bp_list(priv, 473 rte_dpaa2_bpid_info[bpid].bp_list); 474 if (ret) 475 return ret; 476 } 477 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 478 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 479 480 /*Get the flow id from given VQ id*/ 481 flow_id = rx_queue_id % priv->nb_rx_queues; 482 memset(&cfg, 0, sizeof(struct dpni_queue)); 483 484 options = options | DPNI_QUEUE_OPT_USER_CTX; 485 cfg.user_context = (size_t)(dpaa2_q); 486 487 /*if ls2088 or rev2 device, enable the stashing */ 488 489 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 490 options |= DPNI_QUEUE_OPT_FLC; 491 cfg.flc.stash_control = true; 492 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 493 /* 00 00 00 - last 6 bit represent annotation, context stashing, 494 * data stashing setting 01 01 00 (0x14) 495 * (in following order ->DS AS CS) 496 * to enable 1 line data, 1 line annotation. 497 * For LX2, this setting should be 01 00 00 (0x10) 498 */ 499 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 500 cfg.flc.value |= 0x10; 501 else 502 cfg.flc.value |= 0x14; 503 } 504 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 505 dpaa2_q->tc_index, flow_id, options, &cfg); 506 if (ret) { 507 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 508 return -1; 509 } 510 511 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 512 struct dpni_taildrop taildrop; 513 514 taildrop.enable = 1; 515 /*enabling per rx queue congestion control */ 516 taildrop.threshold = CONG_THRESHOLD_RX_Q; 517 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 518 taildrop.oal = CONG_RX_OAL; 519 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", 520 rx_queue_id); 521 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 522 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 523 dpaa2_q->tc_index, flow_id, &taildrop); 524 if (ret) { 525 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 526 ret); 527 return -1; 528 } 529 } 530 531 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 532 return 0; 533 } 534 535 static int 536 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 537 uint16_t tx_queue_id, 538 uint16_t nb_tx_desc __rte_unused, 539 unsigned int socket_id __rte_unused, 540 const struct rte_eth_txconf *tx_conf __rte_unused) 541 { 542 struct dpaa2_dev_priv *priv = dev->data->dev_private; 543 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 544 priv->tx_vq[tx_queue_id]; 545 struct fsl_mc_io *dpni = priv->hw; 546 struct dpni_queue tx_conf_cfg; 547 struct dpni_queue tx_flow_cfg; 548 uint8_t options = 0, flow_id; 549 uint32_t tc_id; 550 int ret; 551 552 PMD_INIT_FUNC_TRACE(); 553 554 /* Return if queue already configured */ 555 if (dpaa2_q->flow_id != 0xffff) { 556 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 557 return 0; 558 } 559 560 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 561 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 562 563 tc_id = tx_queue_id; 564 flow_id = 0; 565 566 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 567 tc_id, flow_id, options, &tx_flow_cfg); 568 if (ret) { 569 DPAA2_PMD_ERR("Error in setting the tx flow: " 570 "tc_id=%d, flow=%d err=%d", 571 tc_id, flow_id, ret); 572 return -1; 573 } 574 575 dpaa2_q->flow_id = flow_id; 576 577 if (tx_queue_id == 0) { 578 /*Set tx-conf and error configuration*/ 579 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 580 priv->token, 581 DPNI_CONF_DISABLE); 582 if (ret) { 583 DPAA2_PMD_ERR("Error in set tx conf mode settings: " 584 "err=%d", ret); 585 return -1; 586 } 587 } 588 dpaa2_q->tc_index = tc_id; 589 590 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 591 struct dpni_congestion_notification_cfg cong_notif_cfg; 592 593 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 594 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 595 /* Notify that the queue is not congested when the data in 596 * the queue is below this thershold. 597 */ 598 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 599 cong_notif_cfg.message_ctx = 0; 600 cong_notif_cfg.message_iova = 601 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); 602 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 603 cong_notif_cfg.notification_mode = 604 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 605 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 606 DPNI_CONG_OPT_COHERENT_WRITE; 607 608 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 609 priv->token, 610 DPNI_QUEUE_TX, 611 tc_id, 612 &cong_notif_cfg); 613 if (ret) { 614 DPAA2_PMD_ERR( 615 "Error in setting tx congestion notification: " 616 "err=%d", ret); 617 return -ret; 618 } 619 } 620 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 621 return 0; 622 } 623 624 static void 625 dpaa2_dev_rx_queue_release(void *q __rte_unused) 626 { 627 PMD_INIT_FUNC_TRACE(); 628 } 629 630 static void 631 dpaa2_dev_tx_queue_release(void *q __rte_unused) 632 { 633 PMD_INIT_FUNC_TRACE(); 634 } 635 636 static uint32_t 637 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 638 { 639 int32_t ret; 640 struct dpaa2_dev_priv *priv = dev->data->dev_private; 641 struct dpaa2_queue *dpaa2_q; 642 struct qbman_swp *swp; 643 struct qbman_fq_query_np_rslt state; 644 uint32_t frame_cnt = 0; 645 646 PMD_INIT_FUNC_TRACE(); 647 648 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 649 ret = dpaa2_affine_qbman_swp(); 650 if (ret) { 651 DPAA2_PMD_ERR("Failure in affining portal"); 652 return -EINVAL; 653 } 654 } 655 swp = DPAA2_PER_LCORE_PORTAL; 656 657 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 658 659 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 660 frame_cnt = qbman_fq_state_frame_count(&state); 661 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", 662 rx_queue_id, frame_cnt); 663 } 664 return frame_cnt; 665 } 666 667 static const uint32_t * 668 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 669 { 670 static const uint32_t ptypes[] = { 671 /*todo -= add more types */ 672 RTE_PTYPE_L2_ETHER, 673 RTE_PTYPE_L3_IPV4, 674 RTE_PTYPE_L3_IPV4_EXT, 675 RTE_PTYPE_L3_IPV6, 676 RTE_PTYPE_L3_IPV6_EXT, 677 RTE_PTYPE_L4_TCP, 678 RTE_PTYPE_L4_UDP, 679 RTE_PTYPE_L4_SCTP, 680 RTE_PTYPE_L4_ICMP, 681 RTE_PTYPE_UNKNOWN 682 }; 683 684 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) 685 return ptypes; 686 return NULL; 687 } 688 689 /** 690 * Dpaa2 link Interrupt handler 691 * 692 * @param param 693 * The address of parameter (struct rte_eth_dev *) regsitered before. 694 * 695 * @return 696 * void 697 */ 698 static void 699 dpaa2_interrupt_handler(void *param) 700 { 701 struct rte_eth_dev *dev = param; 702 struct dpaa2_dev_priv *priv = dev->data->dev_private; 703 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 704 int ret; 705 int irq_index = DPNI_IRQ_INDEX; 706 unsigned int status = 0, clear = 0; 707 708 PMD_INIT_FUNC_TRACE(); 709 710 if (dpni == NULL) { 711 DPAA2_PMD_ERR("dpni is NULL"); 712 return; 713 } 714 715 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 716 irq_index, &status); 717 if (unlikely(ret)) { 718 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 719 clear = 0xffffffff; 720 goto out; 721 } 722 723 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 724 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 725 dpaa2_dev_link_update(dev, 0); 726 /* calling all the apps registered for link status event */ 727 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 728 NULL); 729 } 730 out: 731 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 732 irq_index, clear); 733 if (unlikely(ret)) 734 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 735 } 736 737 static int 738 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 739 { 740 int err = 0; 741 struct dpaa2_dev_priv *priv = dev->data->dev_private; 742 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 743 int irq_index = DPNI_IRQ_INDEX; 744 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 745 746 PMD_INIT_FUNC_TRACE(); 747 748 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 749 irq_index, mask); 750 if (err < 0) { 751 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 752 strerror(-err)); 753 return err; 754 } 755 756 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 757 irq_index, enable); 758 if (err < 0) 759 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 760 strerror(-err)); 761 762 return err; 763 } 764 765 static int 766 dpaa2_dev_start(struct rte_eth_dev *dev) 767 { 768 struct rte_device *rdev = dev->device; 769 struct rte_dpaa2_device *dpaa2_dev; 770 struct rte_eth_dev_data *data = dev->data; 771 struct dpaa2_dev_priv *priv = data->dev_private; 772 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 773 struct dpni_queue cfg; 774 struct dpni_error_cfg err_cfg; 775 uint16_t qdid; 776 struct dpni_queue_id qid; 777 struct dpaa2_queue *dpaa2_q; 778 int ret, i; 779 struct rte_intr_handle *intr_handle; 780 781 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 782 intr_handle = &dpaa2_dev->intr_handle; 783 784 PMD_INIT_FUNC_TRACE(); 785 786 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 787 if (ret) { 788 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 789 priv->hw_id, ret); 790 return ret; 791 } 792 793 /* Power up the phy. Needed to make the link go UP */ 794 dpaa2_dev_set_link_up(dev); 795 796 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 797 DPNI_QUEUE_TX, &qdid); 798 if (ret) { 799 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); 800 return ret; 801 } 802 priv->qdid = qdid; 803 804 for (i = 0; i < data->nb_rx_queues; i++) { 805 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 806 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 807 DPNI_QUEUE_RX, dpaa2_q->tc_index, 808 dpaa2_q->flow_id, &cfg, &qid); 809 if (ret) { 810 DPAA2_PMD_ERR("Error in getting flow information: " 811 "err=%d", ret); 812 return ret; 813 } 814 dpaa2_q->fqid = qid.fqid; 815 } 816 817 /*checksum errors, send them to normal path and set it in annotation */ 818 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 819 820 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 821 err_cfg.set_frame_annotation = true; 822 823 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 824 priv->token, &err_cfg); 825 if (ret) { 826 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 827 ret); 828 return ret; 829 } 830 831 /* if the interrupts were configured on this devices*/ 832 if (intr_handle && (intr_handle->fd) && 833 (dev->data->dev_conf.intr_conf.lsc != 0)) { 834 /* Registering LSC interrupt handler */ 835 rte_intr_callback_register(intr_handle, 836 dpaa2_interrupt_handler, 837 (void *)dev); 838 839 /* enable vfio intr/eventfd mapping 840 * Interrupt index 0 is required, so we can not use 841 * rte_intr_enable. 842 */ 843 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 844 845 /* enable dpni_irqs */ 846 dpaa2_eth_setup_irqs(dev, 1); 847 } 848 849 return 0; 850 } 851 852 /** 853 * This routine disables all traffic on the adapter by issuing a 854 * global reset on the MAC. 855 */ 856 static void 857 dpaa2_dev_stop(struct rte_eth_dev *dev) 858 { 859 struct dpaa2_dev_priv *priv = dev->data->dev_private; 860 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 861 int ret; 862 struct rte_eth_link link; 863 struct rte_intr_handle *intr_handle = dev->intr_handle; 864 865 PMD_INIT_FUNC_TRACE(); 866 867 /* reset interrupt callback */ 868 if (intr_handle && (intr_handle->fd) && 869 (dev->data->dev_conf.intr_conf.lsc != 0)) { 870 /*disable dpni irqs */ 871 dpaa2_eth_setup_irqs(dev, 0); 872 873 /* disable vfio intr before callback unregister */ 874 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 875 876 /* Unregistering LSC interrupt handler */ 877 rte_intr_callback_unregister(intr_handle, 878 dpaa2_interrupt_handler, 879 (void *)dev); 880 } 881 882 dpaa2_dev_set_link_down(dev); 883 884 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 885 if (ret) { 886 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 887 ret, priv->hw_id); 888 return; 889 } 890 891 /* clear the recorded link status */ 892 memset(&link, 0, sizeof(link)); 893 rte_eth_linkstatus_set(dev, &link); 894 } 895 896 static void 897 dpaa2_dev_close(struct rte_eth_dev *dev) 898 { 899 struct dpaa2_dev_priv *priv = dev->data->dev_private; 900 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 901 int ret; 902 struct rte_eth_link link; 903 904 PMD_INIT_FUNC_TRACE(); 905 906 /* Clean the device first */ 907 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 908 if (ret) { 909 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 910 return; 911 } 912 913 memset(&link, 0, sizeof(link)); 914 rte_eth_linkstatus_set(dev, &link); 915 } 916 917 static void 918 dpaa2_dev_promiscuous_enable( 919 struct rte_eth_dev *dev) 920 { 921 int ret; 922 struct dpaa2_dev_priv *priv = dev->data->dev_private; 923 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 924 925 PMD_INIT_FUNC_TRACE(); 926 927 if (dpni == NULL) { 928 DPAA2_PMD_ERR("dpni is NULL"); 929 return; 930 } 931 932 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 933 if (ret < 0) 934 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 935 936 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 937 if (ret < 0) 938 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 939 } 940 941 static void 942 dpaa2_dev_promiscuous_disable( 943 struct rte_eth_dev *dev) 944 { 945 int ret; 946 struct dpaa2_dev_priv *priv = dev->data->dev_private; 947 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 948 949 PMD_INIT_FUNC_TRACE(); 950 951 if (dpni == NULL) { 952 DPAA2_PMD_ERR("dpni is NULL"); 953 return; 954 } 955 956 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 957 if (ret < 0) 958 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 959 960 if (dev->data->all_multicast == 0) { 961 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 962 priv->token, false); 963 if (ret < 0) 964 DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 965 ret); 966 } 967 } 968 969 static void 970 dpaa2_dev_allmulticast_enable( 971 struct rte_eth_dev *dev) 972 { 973 int ret; 974 struct dpaa2_dev_priv *priv = dev->data->dev_private; 975 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 976 977 PMD_INIT_FUNC_TRACE(); 978 979 if (dpni == NULL) { 980 DPAA2_PMD_ERR("dpni is NULL"); 981 return; 982 } 983 984 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 985 if (ret < 0) 986 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 987 } 988 989 static void 990 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 991 { 992 int ret; 993 struct dpaa2_dev_priv *priv = dev->data->dev_private; 994 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 995 996 PMD_INIT_FUNC_TRACE(); 997 998 if (dpni == NULL) { 999 DPAA2_PMD_ERR("dpni is NULL"); 1000 return; 1001 } 1002 1003 /* must remain on for all promiscuous */ 1004 if (dev->data->promiscuous == 1) 1005 return; 1006 1007 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1008 if (ret < 0) 1009 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 1010 } 1011 1012 static int 1013 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1014 { 1015 int ret; 1016 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1017 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1018 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN 1019 + VLAN_TAG_SIZE; 1020 1021 PMD_INIT_FUNC_TRACE(); 1022 1023 if (dpni == NULL) { 1024 DPAA2_PMD_ERR("dpni is NULL"); 1025 return -EINVAL; 1026 } 1027 1028 /* check that mtu is within the allowed range */ 1029 if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) 1030 return -EINVAL; 1031 1032 if (frame_size > ETHER_MAX_LEN) 1033 dev->data->dev_conf.rxmode.offloads &= 1034 DEV_RX_OFFLOAD_JUMBO_FRAME; 1035 else 1036 dev->data->dev_conf.rxmode.offloads &= 1037 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1038 1039 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1040 1041 /* Set the Max Rx frame length as 'mtu' + 1042 * Maximum Ethernet header length 1043 */ 1044 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1045 frame_size); 1046 if (ret) { 1047 DPAA2_PMD_ERR("Setting the max frame length failed"); 1048 return -1; 1049 } 1050 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1051 return 0; 1052 } 1053 1054 static int 1055 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1056 struct ether_addr *addr, 1057 __rte_unused uint32_t index, 1058 __rte_unused uint32_t pool) 1059 { 1060 int ret; 1061 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1062 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1063 1064 PMD_INIT_FUNC_TRACE(); 1065 1066 if (dpni == NULL) { 1067 DPAA2_PMD_ERR("dpni is NULL"); 1068 return -1; 1069 } 1070 1071 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, 1072 priv->token, addr->addr_bytes); 1073 if (ret) 1074 DPAA2_PMD_ERR( 1075 "error: Adding the MAC ADDR failed: err = %d", ret); 1076 return 0; 1077 } 1078 1079 static void 1080 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1081 uint32_t index) 1082 { 1083 int ret; 1084 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1085 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1086 struct rte_eth_dev_data *data = dev->data; 1087 struct ether_addr *macaddr; 1088 1089 PMD_INIT_FUNC_TRACE(); 1090 1091 macaddr = &data->mac_addrs[index]; 1092 1093 if (dpni == NULL) { 1094 DPAA2_PMD_ERR("dpni is NULL"); 1095 return; 1096 } 1097 1098 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1099 priv->token, macaddr->addr_bytes); 1100 if (ret) 1101 DPAA2_PMD_ERR( 1102 "error: Removing the MAC ADDR failed: err = %d", ret); 1103 } 1104 1105 static int 1106 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1107 struct ether_addr *addr) 1108 { 1109 int ret; 1110 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1111 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1112 1113 PMD_INIT_FUNC_TRACE(); 1114 1115 if (dpni == NULL) { 1116 DPAA2_PMD_ERR("dpni is NULL"); 1117 return -EINVAL; 1118 } 1119 1120 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1121 priv->token, addr->addr_bytes); 1122 1123 if (ret) 1124 DPAA2_PMD_ERR( 1125 "error: Setting the MAC ADDR failed %d", ret); 1126 1127 return ret; 1128 } 1129 1130 static 1131 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1132 struct rte_eth_stats *stats) 1133 { 1134 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1135 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1136 int32_t retcode; 1137 uint8_t page0 = 0, page1 = 1, page2 = 2; 1138 union dpni_statistics value; 1139 int i; 1140 struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; 1141 1142 memset(&value, 0, sizeof(union dpni_statistics)); 1143 1144 PMD_INIT_FUNC_TRACE(); 1145 1146 if (!dpni) { 1147 DPAA2_PMD_ERR("dpni is NULL"); 1148 return -EINVAL; 1149 } 1150 1151 if (!stats) { 1152 DPAA2_PMD_ERR("stats is NULL"); 1153 return -EINVAL; 1154 } 1155 1156 /*Get Counters from page_0*/ 1157 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1158 page0, 0, &value); 1159 if (retcode) 1160 goto err; 1161 1162 stats->ipackets = value.page_0.ingress_all_frames; 1163 stats->ibytes = value.page_0.ingress_all_bytes; 1164 1165 /*Get Counters from page_1*/ 1166 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1167 page1, 0, &value); 1168 if (retcode) 1169 goto err; 1170 1171 stats->opackets = value.page_1.egress_all_frames; 1172 stats->obytes = value.page_1.egress_all_bytes; 1173 1174 /*Get Counters from page_2*/ 1175 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1176 page2, 0, &value); 1177 if (retcode) 1178 goto err; 1179 1180 /* Ingress drop frame count due to configured rules */ 1181 stats->ierrors = value.page_2.ingress_filtered_frames; 1182 /* Ingress drop frame count due to error */ 1183 stats->ierrors += value.page_2.ingress_discarded_frames; 1184 1185 stats->oerrors = value.page_2.egress_discarded_frames; 1186 stats->imissed = value.page_2.ingress_nobuffer_discards; 1187 1188 /* Fill in per queue stats */ 1189 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && 1190 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { 1191 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; 1192 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; 1193 if (dpaa2_rxq) 1194 stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; 1195 if (dpaa2_txq) 1196 stats->q_opackets[i] = dpaa2_txq->tx_pkts; 1197 1198 /* Byte counting is not implemented */ 1199 stats->q_ibytes[i] = 0; 1200 stats->q_obytes[i] = 0; 1201 } 1202 1203 return 0; 1204 1205 err: 1206 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1207 return retcode; 1208 }; 1209 1210 static int 1211 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1212 unsigned int n) 1213 { 1214 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1215 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1216 int32_t retcode; 1217 union dpni_statistics value[3] = {}; 1218 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1219 1220 if (n < num) 1221 return num; 1222 1223 if (xstats == NULL) 1224 return 0; 1225 1226 /* Get Counters from page_0*/ 1227 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1228 0, 0, &value[0]); 1229 if (retcode) 1230 goto err; 1231 1232 /* Get Counters from page_1*/ 1233 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1234 1, 0, &value[1]); 1235 if (retcode) 1236 goto err; 1237 1238 /* Get Counters from page_2*/ 1239 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1240 2, 0, &value[2]); 1241 if (retcode) 1242 goto err; 1243 1244 for (i = 0; i < num; i++) { 1245 xstats[i].id = i; 1246 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1247 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1248 } 1249 return i; 1250 err: 1251 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 1252 return retcode; 1253 } 1254 1255 static int 1256 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1257 struct rte_eth_xstat_name *xstats_names, 1258 unsigned int limit) 1259 { 1260 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1261 1262 if (limit < stat_cnt) 1263 return stat_cnt; 1264 1265 if (xstats_names != NULL) 1266 for (i = 0; i < stat_cnt; i++) 1267 snprintf(xstats_names[i].name, 1268 sizeof(xstats_names[i].name), 1269 "%s", 1270 dpaa2_xstats_strings[i].name); 1271 1272 return stat_cnt; 1273 } 1274 1275 static int 1276 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1277 uint64_t *values, unsigned int n) 1278 { 1279 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1280 uint64_t values_copy[stat_cnt]; 1281 1282 if (!ids) { 1283 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1284 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1285 int32_t retcode; 1286 union dpni_statistics value[3] = {}; 1287 1288 if (n < stat_cnt) 1289 return stat_cnt; 1290 1291 if (!values) 1292 return 0; 1293 1294 /* Get Counters from page_0*/ 1295 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1296 0, 0, &value[0]); 1297 if (retcode) 1298 return 0; 1299 1300 /* Get Counters from page_1*/ 1301 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1302 1, 0, &value[1]); 1303 if (retcode) 1304 return 0; 1305 1306 /* Get Counters from page_2*/ 1307 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1308 2, 0, &value[2]); 1309 if (retcode) 1310 return 0; 1311 1312 for (i = 0; i < stat_cnt; i++) { 1313 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1314 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1315 } 1316 return stat_cnt; 1317 } 1318 1319 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1320 1321 for (i = 0; i < n; i++) { 1322 if (ids[i] >= stat_cnt) { 1323 DPAA2_PMD_ERR("xstats id value isn't valid"); 1324 return -1; 1325 } 1326 values[i] = values_copy[ids[i]]; 1327 } 1328 return n; 1329 } 1330 1331 static int 1332 dpaa2_xstats_get_names_by_id( 1333 struct rte_eth_dev *dev, 1334 struct rte_eth_xstat_name *xstats_names, 1335 const uint64_t *ids, 1336 unsigned int limit) 1337 { 1338 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1339 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1340 1341 if (!ids) 1342 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1343 1344 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1345 1346 for (i = 0; i < limit; i++) { 1347 if (ids[i] >= stat_cnt) { 1348 DPAA2_PMD_ERR("xstats id value isn't valid"); 1349 return -1; 1350 } 1351 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1352 } 1353 return limit; 1354 } 1355 1356 static void 1357 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1358 { 1359 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1360 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1361 int32_t retcode; 1362 int i; 1363 struct dpaa2_queue *dpaa2_q; 1364 1365 PMD_INIT_FUNC_TRACE(); 1366 1367 if (dpni == NULL) { 1368 DPAA2_PMD_ERR("dpni is NULL"); 1369 return; 1370 } 1371 1372 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1373 if (retcode) 1374 goto error; 1375 1376 /* Reset the per queue stats in dpaa2_queue structure */ 1377 for (i = 0; i < priv->nb_rx_queues; i++) { 1378 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1379 if (dpaa2_q) 1380 dpaa2_q->rx_pkts = 0; 1381 } 1382 1383 for (i = 0; i < priv->nb_tx_queues; i++) { 1384 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 1385 if (dpaa2_q) 1386 dpaa2_q->tx_pkts = 0; 1387 } 1388 1389 return; 1390 1391 error: 1392 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1393 return; 1394 }; 1395 1396 /* return 0 means link status changed, -1 means not changed */ 1397 static int 1398 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1399 int wait_to_complete __rte_unused) 1400 { 1401 int ret; 1402 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1403 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 1404 struct rte_eth_link link; 1405 struct dpni_link_state state = {0}; 1406 1407 if (dpni == NULL) { 1408 DPAA2_PMD_ERR("dpni is NULL"); 1409 return 0; 1410 } 1411 1412 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1413 if (ret < 0) { 1414 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); 1415 return -1; 1416 } 1417 1418 memset(&link, 0, sizeof(struct rte_eth_link)); 1419 link.link_status = state.up; 1420 link.link_speed = state.rate; 1421 1422 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1423 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1424 else 1425 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1426 1427 ret = rte_eth_linkstatus_set(dev, &link); 1428 if (ret == -1) 1429 DPAA2_PMD_DEBUG("No change in status"); 1430 else 1431 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 1432 link.link_status ? "Up" : "Down"); 1433 1434 return ret; 1435 } 1436 1437 /** 1438 * Toggle the DPNI to enable, if not already enabled. 1439 * This is not strictly PHY up/down - it is more of logical toggling. 1440 */ 1441 static int 1442 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1443 { 1444 int ret = -EINVAL; 1445 struct dpaa2_dev_priv *priv; 1446 struct fsl_mc_io *dpni; 1447 int en = 0; 1448 struct dpni_link_state state = {0}; 1449 1450 priv = dev->data->dev_private; 1451 dpni = (struct fsl_mc_io *)priv->hw; 1452 1453 if (dpni == NULL) { 1454 DPAA2_PMD_ERR("dpni is NULL"); 1455 return ret; 1456 } 1457 1458 /* Check if DPNI is currently enabled */ 1459 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1460 if (ret) { 1461 /* Unable to obtain dpni status; Not continuing */ 1462 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1463 return -EINVAL; 1464 } 1465 1466 /* Enable link if not already enabled */ 1467 if (!en) { 1468 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1469 if (ret) { 1470 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1471 return -EINVAL; 1472 } 1473 } 1474 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1475 if (ret < 0) { 1476 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); 1477 return -1; 1478 } 1479 1480 /* changing tx burst function to start enqueues */ 1481 dev->tx_pkt_burst = dpaa2_dev_tx; 1482 dev->data->dev_link.link_status = state.up; 1483 1484 if (state.up) 1485 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1486 else 1487 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1488 return ret; 1489 } 1490 1491 /** 1492 * Toggle the DPNI to disable, if not already disabled. 1493 * This is not strictly PHY up/down - it is more of logical toggling. 1494 */ 1495 static int 1496 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1497 { 1498 int ret = -EINVAL; 1499 struct dpaa2_dev_priv *priv; 1500 struct fsl_mc_io *dpni; 1501 int dpni_enabled = 0; 1502 int retries = 10; 1503 1504 PMD_INIT_FUNC_TRACE(); 1505 1506 priv = dev->data->dev_private; 1507 dpni = (struct fsl_mc_io *)priv->hw; 1508 1509 if (dpni == NULL) { 1510 DPAA2_PMD_ERR("Device has not yet been configured"); 1511 return ret; 1512 } 1513 1514 /*changing tx burst function to avoid any more enqueues */ 1515 dev->tx_pkt_burst = dummy_dev_tx; 1516 1517 /* Loop while dpni_disable() attempts to drain the egress FQs 1518 * and confirm them back to us. 1519 */ 1520 do { 1521 ret = dpni_disable(dpni, 0, priv->token); 1522 if (ret) { 1523 DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 1524 return ret; 1525 } 1526 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1527 if (ret) { 1528 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 1529 return ret; 1530 } 1531 if (dpni_enabled) 1532 /* Allow the MC some slack */ 1533 rte_delay_us(100 * 1000); 1534 } while (dpni_enabled && --retries); 1535 1536 if (!retries) { 1537 DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 1538 /* todo- we may have to manually cleanup queues. 1539 */ 1540 } else { 1541 DPAA2_PMD_INFO("Port %d Link DOWN successful", 1542 dev->data->port_id); 1543 } 1544 1545 dev->data->dev_link.link_status = 0; 1546 1547 return ret; 1548 } 1549 1550 static int 1551 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1552 { 1553 int ret = -EINVAL; 1554 struct dpaa2_dev_priv *priv; 1555 struct fsl_mc_io *dpni; 1556 struct dpni_link_state state = {0}; 1557 1558 PMD_INIT_FUNC_TRACE(); 1559 1560 priv = dev->data->dev_private; 1561 dpni = (struct fsl_mc_io *)priv->hw; 1562 1563 if (dpni == NULL || fc_conf == NULL) { 1564 DPAA2_PMD_ERR("device not configured"); 1565 return ret; 1566 } 1567 1568 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1569 if (ret) { 1570 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1571 return ret; 1572 } 1573 1574 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1575 if (state.options & DPNI_LINK_OPT_PAUSE) { 1576 /* DPNI_LINK_OPT_PAUSE set 1577 * if ASYM_PAUSE not set, 1578 * RX Side flow control (handle received Pause frame) 1579 * TX side flow control (send Pause frame) 1580 * if ASYM_PAUSE set, 1581 * RX Side flow control (handle received Pause frame) 1582 * No TX side flow control (send Pause frame disabled) 1583 */ 1584 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1585 fc_conf->mode = RTE_FC_FULL; 1586 else 1587 fc_conf->mode = RTE_FC_RX_PAUSE; 1588 } else { 1589 /* DPNI_LINK_OPT_PAUSE not set 1590 * if ASYM_PAUSE set, 1591 * TX side flow control (send Pause frame) 1592 * No RX side flow control (No action on pause frame rx) 1593 * if ASYM_PAUSE not set, 1594 * Flow control disabled 1595 */ 1596 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1597 fc_conf->mode = RTE_FC_TX_PAUSE; 1598 else 1599 fc_conf->mode = RTE_FC_NONE; 1600 } 1601 1602 return ret; 1603 } 1604 1605 static int 1606 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1607 { 1608 int ret = -EINVAL; 1609 struct dpaa2_dev_priv *priv; 1610 struct fsl_mc_io *dpni; 1611 struct dpni_link_state state = {0}; 1612 struct dpni_link_cfg cfg = {0}; 1613 1614 PMD_INIT_FUNC_TRACE(); 1615 1616 priv = dev->data->dev_private; 1617 dpni = (struct fsl_mc_io *)priv->hw; 1618 1619 if (dpni == NULL) { 1620 DPAA2_PMD_ERR("dpni is NULL"); 1621 return ret; 1622 } 1623 1624 /* It is necessary to obtain the current state before setting fc_conf 1625 * as MC would return error in case rate, autoneg or duplex values are 1626 * different. 1627 */ 1628 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1629 if (ret) { 1630 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 1631 return -1; 1632 } 1633 1634 /* Disable link before setting configuration */ 1635 dpaa2_dev_set_link_down(dev); 1636 1637 /* Based on fc_conf, update cfg */ 1638 cfg.rate = state.rate; 1639 cfg.options = state.options; 1640 1641 /* update cfg with fc_conf */ 1642 switch (fc_conf->mode) { 1643 case RTE_FC_FULL: 1644 /* Full flow control; 1645 * OPT_PAUSE set, ASYM_PAUSE not set 1646 */ 1647 cfg.options |= DPNI_LINK_OPT_PAUSE; 1648 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1649 break; 1650 case RTE_FC_TX_PAUSE: 1651 /* Enable RX flow control 1652 * OPT_PAUSE not set; 1653 * ASYM_PAUSE set; 1654 */ 1655 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1656 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1657 break; 1658 case RTE_FC_RX_PAUSE: 1659 /* Enable TX Flow control 1660 * OPT_PAUSE set 1661 * ASYM_PAUSE set 1662 */ 1663 cfg.options |= DPNI_LINK_OPT_PAUSE; 1664 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1665 break; 1666 case RTE_FC_NONE: 1667 /* Disable Flow control 1668 * OPT_PAUSE not set 1669 * ASYM_PAUSE not set 1670 */ 1671 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1672 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1673 break; 1674 default: 1675 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 1676 fc_conf->mode); 1677 return -1; 1678 } 1679 1680 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1681 if (ret) 1682 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 1683 ret); 1684 1685 /* Enable link */ 1686 dpaa2_dev_set_link_up(dev); 1687 1688 return ret; 1689 } 1690 1691 static int 1692 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1693 struct rte_eth_rss_conf *rss_conf) 1694 { 1695 struct rte_eth_dev_data *data = dev->data; 1696 struct rte_eth_conf *eth_conf = &data->dev_conf; 1697 int ret; 1698 1699 PMD_INIT_FUNC_TRACE(); 1700 1701 if (rss_conf->rss_hf) { 1702 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1703 if (ret) { 1704 DPAA2_PMD_ERR("Unable to set flow dist"); 1705 return ret; 1706 } 1707 } else { 1708 ret = dpaa2_remove_flow_dist(dev, 0); 1709 if (ret) { 1710 DPAA2_PMD_ERR("Unable to remove flow dist"); 1711 return ret; 1712 } 1713 } 1714 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1715 return 0; 1716 } 1717 1718 static int 1719 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1720 struct rte_eth_rss_conf *rss_conf) 1721 { 1722 struct rte_eth_dev_data *data = dev->data; 1723 struct rte_eth_conf *eth_conf = &data->dev_conf; 1724 1725 /* dpaa2 does not support rss_key, so length should be 0*/ 1726 rss_conf->rss_key_len = 0; 1727 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1728 return 0; 1729 } 1730 1731 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1732 int eth_rx_queue_id, 1733 uint16_t dpcon_id, 1734 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 1735 { 1736 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1737 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1738 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1739 uint8_t flow_id = dpaa2_ethq->flow_id; 1740 struct dpni_queue cfg; 1741 uint8_t options; 1742 int ret; 1743 1744 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 1745 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 1746 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 1747 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 1748 else 1749 return -EINVAL; 1750 1751 memset(&cfg, 0, sizeof(struct dpni_queue)); 1752 options = DPNI_QUEUE_OPT_DEST; 1753 cfg.destination.type = DPNI_DEST_DPCON; 1754 cfg.destination.id = dpcon_id; 1755 cfg.destination.priority = queue_conf->ev.priority; 1756 1757 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 1758 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 1759 cfg.destination.hold_active = 1; 1760 } 1761 1762 options |= DPNI_QUEUE_OPT_USER_CTX; 1763 cfg.user_context = (size_t)(dpaa2_ethq); 1764 1765 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1766 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1767 if (ret) { 1768 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1769 return ret; 1770 } 1771 1772 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 1773 1774 return 0; 1775 } 1776 1777 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 1778 int eth_rx_queue_id) 1779 { 1780 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 1781 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; 1782 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 1783 uint8_t flow_id = dpaa2_ethq->flow_id; 1784 struct dpni_queue cfg; 1785 uint8_t options; 1786 int ret; 1787 1788 memset(&cfg, 0, sizeof(struct dpni_queue)); 1789 options = DPNI_QUEUE_OPT_DEST; 1790 cfg.destination.type = DPNI_DEST_NONE; 1791 1792 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 1793 dpaa2_ethq->tc_index, flow_id, options, &cfg); 1794 if (ret) 1795 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 1796 1797 return ret; 1798 } 1799 1800 static struct eth_dev_ops dpaa2_ethdev_ops = { 1801 .dev_configure = dpaa2_eth_dev_configure, 1802 .dev_start = dpaa2_dev_start, 1803 .dev_stop = dpaa2_dev_stop, 1804 .dev_close = dpaa2_dev_close, 1805 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 1806 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 1807 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 1808 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 1809 .dev_set_link_up = dpaa2_dev_set_link_up, 1810 .dev_set_link_down = dpaa2_dev_set_link_down, 1811 .link_update = dpaa2_dev_link_update, 1812 .stats_get = dpaa2_dev_stats_get, 1813 .xstats_get = dpaa2_dev_xstats_get, 1814 .xstats_get_by_id = dpaa2_xstats_get_by_id, 1815 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 1816 .xstats_get_names = dpaa2_xstats_get_names, 1817 .stats_reset = dpaa2_dev_stats_reset, 1818 .xstats_reset = dpaa2_dev_stats_reset, 1819 .fw_version_get = dpaa2_fw_version_get, 1820 .dev_infos_get = dpaa2_dev_info_get, 1821 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 1822 .mtu_set = dpaa2_dev_mtu_set, 1823 .vlan_filter_set = dpaa2_vlan_filter_set, 1824 .vlan_offload_set = dpaa2_vlan_offload_set, 1825 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 1826 .rx_queue_release = dpaa2_dev_rx_queue_release, 1827 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 1828 .tx_queue_release = dpaa2_dev_tx_queue_release, 1829 .rx_queue_count = dpaa2_dev_rx_queue_count, 1830 .flow_ctrl_get = dpaa2_flow_ctrl_get, 1831 .flow_ctrl_set = dpaa2_flow_ctrl_set, 1832 .mac_addr_add = dpaa2_dev_add_mac_addr, 1833 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 1834 .mac_addr_set = dpaa2_dev_set_mac_addr, 1835 .rss_hash_update = dpaa2_dev_rss_hash_update, 1836 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 1837 }; 1838 1839 /* Populate the mac address from physically available (u-boot/firmware) and/or 1840 * one set by higher layers like MC (restool) etc. 1841 * Returns the table of MAC entries (multiple entries) 1842 */ 1843 static int 1844 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, 1845 struct ether_addr *mac_entry) 1846 { 1847 int ret; 1848 struct ether_addr phy_mac, prime_mac; 1849 1850 memset(&phy_mac, 0, sizeof(struct ether_addr)); 1851 memset(&prime_mac, 0, sizeof(struct ether_addr)); 1852 1853 /* Get the physical device MAC address */ 1854 ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 1855 phy_mac.addr_bytes); 1856 if (ret) { 1857 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); 1858 goto cleanup; 1859 } 1860 1861 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 1862 prime_mac.addr_bytes); 1863 if (ret) { 1864 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); 1865 goto cleanup; 1866 } 1867 1868 /* Now that both MAC have been obtained, do: 1869 * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy 1870 * and return phy 1871 * If empty_mac(phy), return prime. 1872 * if both are empty, create random MAC, set as prime and return 1873 */ 1874 if (!is_zero_ether_addr(&phy_mac)) { 1875 /* If the addresses are not same, overwrite prime */ 1876 if (!is_same_ether_addr(&phy_mac, &prime_mac)) { 1877 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1878 priv->token, 1879 phy_mac.addr_bytes); 1880 if (ret) { 1881 DPAA2_PMD_ERR("Unable to set MAC Address: %d", 1882 ret); 1883 goto cleanup; 1884 } 1885 memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr)); 1886 } 1887 } else if (is_zero_ether_addr(&prime_mac)) { 1888 /* In case phys and prime, both are zero, create random MAC */ 1889 eth_random_addr(prime_mac.addr_bytes); 1890 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 1891 priv->token, 1892 prime_mac.addr_bytes); 1893 if (ret) { 1894 DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); 1895 goto cleanup; 1896 } 1897 } 1898 1899 /* prime_mac the final MAC address */ 1900 memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr)); 1901 return 0; 1902 1903 cleanup: 1904 return -1; 1905 } 1906 1907 static int 1908 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 1909 { 1910 struct rte_device *dev = eth_dev->device; 1911 struct rte_dpaa2_device *dpaa2_dev; 1912 struct fsl_mc_io *dpni_dev; 1913 struct dpni_attr attr; 1914 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 1915 struct dpni_buffer_layout layout; 1916 int ret, hw_id; 1917 1918 PMD_INIT_FUNC_TRACE(); 1919 1920 /* For secondary processes, the primary has done all the work */ 1921 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1922 /* In case of secondary, only burst and ops API need to be 1923 * plugged. 1924 */ 1925 eth_dev->dev_ops = &dpaa2_ethdev_ops; 1926 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 1927 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 1928 return 0; 1929 } 1930 1931 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 1932 1933 hw_id = dpaa2_dev->object_id; 1934 1935 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 1936 if (!dpni_dev) { 1937 DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 1938 return -1; 1939 } 1940 1941 dpni_dev->regs = rte_mcp_ptr_list[0]; 1942 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 1943 if (ret) { 1944 DPAA2_PMD_ERR( 1945 "Failure in opening dpni@%d with err code %d", 1946 hw_id, ret); 1947 rte_free(dpni_dev); 1948 return -1; 1949 } 1950 1951 /* Clean the device first */ 1952 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 1953 if (ret) { 1954 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 1955 hw_id, ret); 1956 goto init_err; 1957 } 1958 1959 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 1960 if (ret) { 1961 DPAA2_PMD_ERR( 1962 "Failure in get dpni@%d attribute, err code %d", 1963 hw_id, ret); 1964 goto init_err; 1965 } 1966 1967 priv->num_rx_tc = attr.num_rx_tcs; 1968 1969 /* Resetting the "num_rx_queues" to equal number of queues in first TC 1970 * as only one TC is supported on Rx Side. Once Multiple TCs will be 1971 * in use for Rx processing then this will be changed or removed. 1972 */ 1973 priv->nb_rx_queues = attr.num_queues; 1974 1975 /* Using number of TX queues as number of TX TCs */ 1976 priv->nb_tx_queues = attr.num_tx_tcs; 1977 1978 DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", 1979 priv->num_rx_tc, priv->nb_rx_queues, 1980 priv->nb_tx_queues); 1981 1982 priv->hw = dpni_dev; 1983 priv->hw_id = hw_id; 1984 priv->options = attr.options; 1985 priv->max_mac_filters = attr.mac_filter_entries; 1986 priv->max_vlan_filters = attr.vlan_filter_entries; 1987 priv->flags = 0; 1988 1989 /* Allocate memory for hardware structure for queues */ 1990 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 1991 if (ret) { 1992 DPAA2_PMD_ERR("Queue allocation Failed"); 1993 goto init_err; 1994 } 1995 1996 /* Allocate memory for storing MAC addresses. 1997 * Table of mac_filter_entries size is allocated so that RTE ether lib 1998 * can add MAC entries when rte_eth_dev_mac_addr_add is called. 1999 */ 2000 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 2001 ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 2002 if (eth_dev->data->mac_addrs == NULL) { 2003 DPAA2_PMD_ERR( 2004 "Failed to allocate %d bytes needed to store MAC addresses", 2005 ETHER_ADDR_LEN * attr.mac_filter_entries); 2006 ret = -ENOMEM; 2007 goto init_err; 2008 } 2009 2010 ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); 2011 if (ret) { 2012 DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); 2013 rte_free(eth_dev->data->mac_addrs); 2014 eth_dev->data->mac_addrs = NULL; 2015 goto init_err; 2016 } 2017 2018 /* ... tx buffer layout ... */ 2019 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 2020 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2021 layout.pass_frame_status = 1; 2022 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2023 DPNI_QUEUE_TX, &layout); 2024 if (ret) { 2025 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 2026 goto init_err; 2027 } 2028 2029 /* ... tx-conf and error buffer layout ... */ 2030 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 2031 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2032 layout.pass_frame_status = 1; 2033 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2034 DPNI_QUEUE_TX_CONFIRM, &layout); 2035 if (ret) { 2036 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 2037 ret); 2038 goto init_err; 2039 } 2040 2041 eth_dev->dev_ops = &dpaa2_ethdev_ops; 2042 2043 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2044 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 2045 2046 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 2047 return 0; 2048 init_err: 2049 dpaa2_dev_uninit(eth_dev); 2050 return ret; 2051 } 2052 2053 static int 2054 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 2055 { 2056 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 2057 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; 2058 int ret; 2059 2060 PMD_INIT_FUNC_TRACE(); 2061 2062 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2063 return 0; 2064 2065 if (!dpni) { 2066 DPAA2_PMD_WARN("Already closed or not started"); 2067 return -1; 2068 } 2069 2070 dpaa2_dev_close(eth_dev); 2071 2072 dpaa2_free_rx_tx_queues(eth_dev); 2073 2074 /* Close the device at underlying layer*/ 2075 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 2076 if (ret) { 2077 DPAA2_PMD_ERR( 2078 "Failure closing dpni device with err code %d", 2079 ret); 2080 } 2081 2082 /* Free the allocated memory for ethernet private data and dpni*/ 2083 priv->hw = NULL; 2084 rte_free(dpni); 2085 2086 eth_dev->dev_ops = NULL; 2087 eth_dev->rx_pkt_burst = NULL; 2088 eth_dev->tx_pkt_burst = NULL; 2089 2090 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); 2091 return 0; 2092 } 2093 2094 static int 2095 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 2096 struct rte_dpaa2_device *dpaa2_dev) 2097 { 2098 struct rte_eth_dev *eth_dev; 2099 int diag; 2100 2101 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2102 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 2103 if (!eth_dev) 2104 return -ENODEV; 2105 eth_dev->data->dev_private = rte_zmalloc( 2106 "ethdev private structure", 2107 sizeof(struct dpaa2_dev_priv), 2108 RTE_CACHE_LINE_SIZE); 2109 if (eth_dev->data->dev_private == NULL) { 2110 DPAA2_PMD_CRIT( 2111 "Unable to allocate memory for private data"); 2112 rte_eth_dev_release_port(eth_dev); 2113 return -ENOMEM; 2114 } 2115 } else { 2116 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 2117 if (!eth_dev) 2118 return -ENODEV; 2119 } 2120 2121 eth_dev->device = &dpaa2_dev->device; 2122 2123 dpaa2_dev->eth_dev = eth_dev; 2124 eth_dev->data->rx_mbuf_alloc_failed = 0; 2125 2126 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 2127 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2128 2129 /* Invoke PMD device initialization function */ 2130 diag = dpaa2_dev_init(eth_dev); 2131 if (diag == 0) { 2132 rte_eth_dev_probing_finish(eth_dev); 2133 return 0; 2134 } 2135 2136 rte_eth_dev_release_port(eth_dev); 2137 return diag; 2138 } 2139 2140 static int 2141 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2142 { 2143 struct rte_eth_dev *eth_dev; 2144 2145 eth_dev = dpaa2_dev->eth_dev; 2146 dpaa2_dev_uninit(eth_dev); 2147 2148 rte_eth_dev_release_port(eth_dev); 2149 2150 return 0; 2151 } 2152 2153 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2154 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2155 .drv_type = DPAA2_ETH, 2156 .probe = rte_dpaa2_probe, 2157 .remove = rte_dpaa2_remove, 2158 }; 2159 2160 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2161 2162 RTE_INIT(dpaa2_pmd_init_log) 2163 { 2164 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); 2165 if (dpaa2_logtype_pmd >= 0) 2166 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); 2167 } 2168