1 /* * SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2016 NXP 5 * 6 */ 7 8 #include <time.h> 9 #include <net/if.h> 10 11 #include <rte_mbuf.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_malloc.h> 14 #include <rte_memcpy.h> 15 #include <rte_string_fns.h> 16 #include <rte_cycles.h> 17 #include <rte_kvargs.h> 18 #include <rte_dev.h> 19 #include <rte_fslmc.h> 20 #include <rte_flow_driver.h> 21 22 #include "dpaa2_pmd_logs.h" 23 #include <fslmc_vfio.h> 24 #include <dpaa2_hw_pvt.h> 25 #include <dpaa2_hw_mempool.h> 26 #include <dpaa2_hw_dpio.h> 27 #include <mc/fsl_dpmng.h> 28 #include "dpaa2_ethdev.h" 29 #include "dpaa2_sparser.h" 30 #include <fsl_qbman_debug.h> 31 32 #define DRIVER_LOOPBACK_MODE "drv_loopback" 33 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" 34 35 /* Supported Rx offloads */ 36 static uint64_t dev_rx_offloads_sup = 37 DEV_RX_OFFLOAD_CHECKSUM | 38 DEV_RX_OFFLOAD_SCTP_CKSUM | 39 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 40 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | 41 DEV_RX_OFFLOAD_VLAN_STRIP | 42 DEV_RX_OFFLOAD_VLAN_FILTER | 43 DEV_RX_OFFLOAD_JUMBO_FRAME | 44 DEV_RX_OFFLOAD_TIMESTAMP; 45 46 /* Rx offloads which cannot be disabled */ 47 static uint64_t dev_rx_offloads_nodis = 48 DEV_RX_OFFLOAD_RSS_HASH | 49 DEV_RX_OFFLOAD_SCATTER; 50 51 /* Supported Tx offloads */ 52 static uint64_t dev_tx_offloads_sup = 53 DEV_TX_OFFLOAD_VLAN_INSERT | 54 DEV_TX_OFFLOAD_IPV4_CKSUM | 55 DEV_TX_OFFLOAD_UDP_CKSUM | 56 DEV_TX_OFFLOAD_TCP_CKSUM | 57 DEV_TX_OFFLOAD_SCTP_CKSUM | 58 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 59 DEV_TX_OFFLOAD_MT_LOCKFREE | 60 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 61 62 /* Tx offloads which cannot be disabled */ 63 static uint64_t dev_tx_offloads_nodis = 64 DEV_TX_OFFLOAD_MULTI_SEGS; 65 66 /* enable timestamp in mbuf */ 67 enum pmd_dpaa2_ts dpaa2_enable_ts; 68 69 struct rte_dpaa2_xstats_name_off { 70 char name[RTE_ETH_XSTATS_NAME_SIZE]; 71 uint8_t page_id; /* dpni statistics page id */ 72 uint8_t stats_id; /* stats id in the given page */ 73 }; 74 75 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { 76 {"ingress_multicast_frames", 0, 2}, 77 {"ingress_multicast_bytes", 0, 3}, 78 {"ingress_broadcast_frames", 0, 4}, 79 {"ingress_broadcast_bytes", 0, 5}, 80 {"egress_multicast_frames", 1, 2}, 81 {"egress_multicast_bytes", 1, 3}, 82 {"egress_broadcast_frames", 1, 4}, 83 {"egress_broadcast_bytes", 1, 5}, 84 {"ingress_filtered_frames", 2, 0}, 85 {"ingress_discarded_frames", 2, 1}, 86 {"ingress_nobuffer_discards", 2, 2}, 87 {"egress_discarded_frames", 2, 3}, 88 {"egress_confirmed_frames", 2, 4}, 89 {"cgr_reject_frames", 4, 0}, 90 {"cgr_reject_bytes", 4, 1}, 91 }; 92 93 static const enum rte_filter_op dpaa2_supported_filter_ops[] = { 94 RTE_ETH_FILTER_ADD, 95 RTE_ETH_FILTER_DELETE, 96 RTE_ETH_FILTER_UPDATE, 97 RTE_ETH_FILTER_FLUSH, 98 RTE_ETH_FILTER_GET 99 }; 100 101 static struct rte_dpaa2_driver rte_dpaa2_pmd; 102 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); 103 static int dpaa2_dev_link_update(struct rte_eth_dev *dev, 104 int wait_to_complete); 105 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); 106 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); 107 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 108 109 int dpaa2_logtype_pmd; 110 111 void 112 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable) 113 { 114 dpaa2_enable_ts = enable; 115 } 116 117 static int 118 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 119 { 120 int ret; 121 struct dpaa2_dev_priv *priv = dev->data->dev_private; 122 struct fsl_mc_io *dpni = dev->process_private; 123 124 PMD_INIT_FUNC_TRACE(); 125 126 if (dpni == NULL) { 127 DPAA2_PMD_ERR("dpni is NULL"); 128 return -1; 129 } 130 131 if (on) 132 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, 133 vlan_id, 0, 0, 0); 134 else 135 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, 136 priv->token, vlan_id); 137 138 if (ret < 0) 139 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", 140 ret, vlan_id, priv->hw_id); 141 142 return ret; 143 } 144 145 static int 146 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) 147 { 148 struct dpaa2_dev_priv *priv = dev->data->dev_private; 149 struct fsl_mc_io *dpni = dev->process_private; 150 int ret = 0; 151 152 PMD_INIT_FUNC_TRACE(); 153 154 if (mask & ETH_VLAN_FILTER_MASK) { 155 /* VLAN Filter not avaialble */ 156 if (!priv->max_vlan_filters) { 157 DPAA2_PMD_INFO("VLAN filter not available"); 158 return -ENOTSUP; 159 } 160 161 if (dev->data->dev_conf.rxmode.offloads & 162 DEV_RX_OFFLOAD_VLAN_FILTER) 163 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 164 priv->token, true); 165 else 166 ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, 167 priv->token, false); 168 if (ret < 0) 169 DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); 170 } 171 172 return ret; 173 } 174 175 static int 176 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, 177 enum rte_vlan_type vlan_type __rte_unused, 178 uint16_t tpid) 179 { 180 struct dpaa2_dev_priv *priv = dev->data->dev_private; 181 struct fsl_mc_io *dpni = dev->process_private; 182 int ret = -ENOTSUP; 183 184 PMD_INIT_FUNC_TRACE(); 185 186 /* nothing to be done for standard vlan tpids */ 187 if (tpid == 0x8100 || tpid == 0x88A8) 188 return 0; 189 190 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 191 priv->token, tpid); 192 if (ret < 0) 193 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret); 194 /* if already configured tpids, remove them first */ 195 if (ret == -EBUSY) { 196 struct dpni_custom_tpid_cfg tpid_list = {0}; 197 198 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW, 199 priv->token, &tpid_list); 200 if (ret < 0) 201 goto fail; 202 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW, 203 priv->token, tpid_list.tpid1); 204 if (ret < 0) 205 goto fail; 206 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, 207 priv->token, tpid); 208 } 209 fail: 210 return ret; 211 } 212 213 static int 214 dpaa2_fw_version_get(struct rte_eth_dev *dev, 215 char *fw_version, 216 size_t fw_size) 217 { 218 int ret; 219 struct fsl_mc_io *dpni = dev->process_private; 220 struct mc_soc_version mc_plat_info = {0}; 221 struct mc_version mc_ver_info = {0}; 222 223 PMD_INIT_FUNC_TRACE(); 224 225 if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) 226 DPAA2_PMD_WARN("\tmc_get_soc_version failed"); 227 228 if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) 229 DPAA2_PMD_WARN("\tmc_get_version failed"); 230 231 ret = snprintf(fw_version, fw_size, 232 "%x-%d.%d.%d", 233 mc_plat_info.svr, 234 mc_ver_info.major, 235 mc_ver_info.minor, 236 mc_ver_info.revision); 237 238 ret += 1; /* add the size of '\0' */ 239 if (fw_size < (uint32_t)ret) 240 return ret; 241 else 242 return 0; 243 } 244 245 static int 246 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 247 { 248 struct dpaa2_dev_priv *priv = dev->data->dev_private; 249 250 PMD_INIT_FUNC_TRACE(); 251 252 dev_info->max_mac_addrs = priv->max_mac_filters; 253 dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; 254 dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; 255 dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; 256 dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; 257 dev_info->rx_offload_capa = dev_rx_offloads_sup | 258 dev_rx_offloads_nodis; 259 dev_info->tx_offload_capa = dev_tx_offloads_sup | 260 dev_tx_offloads_nodis; 261 dev_info->speed_capa = ETH_LINK_SPEED_1G | 262 ETH_LINK_SPEED_2_5G | 263 ETH_LINK_SPEED_10G; 264 265 dev_info->max_hash_mac_addrs = 0; 266 dev_info->max_vfs = 0; 267 dev_info->max_vmdq_pools = ETH_16_POOLS; 268 dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; 269 270 return 0; 271 } 272 273 static int 274 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) 275 { 276 struct dpaa2_dev_priv *priv = dev->data->dev_private; 277 uint16_t dist_idx; 278 uint32_t vq_id; 279 uint8_t num_rxqueue_per_tc; 280 struct dpaa2_queue *mc_q, *mcq; 281 uint32_t tot_queues; 282 int i; 283 struct dpaa2_queue *dpaa2_q; 284 285 PMD_INIT_FUNC_TRACE(); 286 287 num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); 288 if (priv->tx_conf_en) 289 tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; 290 else 291 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; 292 mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, 293 RTE_CACHE_LINE_SIZE); 294 if (!mc_q) { 295 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); 296 return -1; 297 } 298 299 for (i = 0; i < priv->nb_rx_queues; i++) { 300 mc_q->eth_data = dev->data; 301 priv->rx_vq[i] = mc_q++; 302 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 303 dpaa2_q->q_storage = rte_malloc("dq_storage", 304 sizeof(struct queue_storage_info_t), 305 RTE_CACHE_LINE_SIZE); 306 if (!dpaa2_q->q_storage) 307 goto fail; 308 309 memset(dpaa2_q->q_storage, 0, 310 sizeof(struct queue_storage_info_t)); 311 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 312 goto fail; 313 } 314 315 for (i = 0; i < priv->nb_tx_queues; i++) { 316 mc_q->eth_data = dev->data; 317 mc_q->flow_id = 0xffff; 318 priv->tx_vq[i] = mc_q++; 319 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 320 dpaa2_q->cscn = rte_malloc(NULL, 321 sizeof(struct qbman_result), 16); 322 if (!dpaa2_q->cscn) 323 goto fail_tx; 324 } 325 326 if (priv->tx_conf_en) { 327 /*Setup tx confirmation queues*/ 328 for (i = 0; i < priv->nb_tx_queues; i++) { 329 mc_q->eth_data = dev->data; 330 mc_q->tc_index = i; 331 mc_q->flow_id = 0; 332 priv->tx_conf_vq[i] = mc_q++; 333 dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; 334 dpaa2_q->q_storage = 335 rte_malloc("dq_storage", 336 sizeof(struct queue_storage_info_t), 337 RTE_CACHE_LINE_SIZE); 338 if (!dpaa2_q->q_storage) 339 goto fail_tx_conf; 340 341 memset(dpaa2_q->q_storage, 0, 342 sizeof(struct queue_storage_info_t)); 343 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) 344 goto fail_tx_conf; 345 } 346 } 347 348 vq_id = 0; 349 for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { 350 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; 351 mcq->tc_index = dist_idx / num_rxqueue_per_tc; 352 mcq->flow_id = dist_idx % num_rxqueue_per_tc; 353 vq_id++; 354 } 355 356 return 0; 357 fail_tx_conf: 358 i -= 1; 359 while (i >= 0) { 360 dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; 361 rte_free(dpaa2_q->q_storage); 362 priv->tx_conf_vq[i--] = NULL; 363 } 364 i = priv->nb_tx_queues; 365 fail_tx: 366 i -= 1; 367 while (i >= 0) { 368 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 369 rte_free(dpaa2_q->cscn); 370 priv->tx_vq[i--] = NULL; 371 } 372 i = priv->nb_rx_queues; 373 fail: 374 i -= 1; 375 mc_q = priv->rx_vq[0]; 376 while (i >= 0) { 377 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 378 dpaa2_free_dq_storage(dpaa2_q->q_storage); 379 rte_free(dpaa2_q->q_storage); 380 priv->rx_vq[i--] = NULL; 381 } 382 rte_free(mc_q); 383 return -1; 384 } 385 386 static void 387 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) 388 { 389 struct dpaa2_dev_priv *priv = dev->data->dev_private; 390 struct dpaa2_queue *dpaa2_q; 391 int i; 392 393 PMD_INIT_FUNC_TRACE(); 394 395 /* Queue allocation base */ 396 if (priv->rx_vq[0]) { 397 /* cleaning up queue storage */ 398 for (i = 0; i < priv->nb_rx_queues; i++) { 399 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 400 if (dpaa2_q->q_storage) 401 rte_free(dpaa2_q->q_storage); 402 } 403 /* cleanup tx queue cscn */ 404 for (i = 0; i < priv->nb_tx_queues; i++) { 405 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 406 rte_free(dpaa2_q->cscn); 407 } 408 if (priv->tx_conf_en) { 409 /* cleanup tx conf queue storage */ 410 for (i = 0; i < priv->nb_tx_queues; i++) { 411 dpaa2_q = (struct dpaa2_queue *) 412 priv->tx_conf_vq[i]; 413 rte_free(dpaa2_q->q_storage); 414 } 415 } 416 /*free memory for all queues (RX+TX) */ 417 rte_free(priv->rx_vq[0]); 418 priv->rx_vq[0] = NULL; 419 } 420 } 421 422 static int 423 dpaa2_eth_dev_configure(struct rte_eth_dev *dev) 424 { 425 struct dpaa2_dev_priv *priv = dev->data->dev_private; 426 struct fsl_mc_io *dpni = dev->process_private; 427 struct rte_eth_conf *eth_conf = &dev->data->dev_conf; 428 uint64_t rx_offloads = eth_conf->rxmode.offloads; 429 uint64_t tx_offloads = eth_conf->txmode.offloads; 430 int rx_l3_csum_offload = false; 431 int rx_l4_csum_offload = false; 432 int tx_l3_csum_offload = false; 433 int tx_l4_csum_offload = false; 434 int ret; 435 436 PMD_INIT_FUNC_TRACE(); 437 438 /* Rx offloads which are enabled by default */ 439 if (dev_rx_offloads_nodis & ~rx_offloads) { 440 DPAA2_PMD_INFO( 441 "Some of rx offloads enabled by default - requested 0x%" PRIx64 442 " fixed are 0x%" PRIx64, 443 rx_offloads, dev_rx_offloads_nodis); 444 } 445 446 /* Tx offloads which are enabled by default */ 447 if (dev_tx_offloads_nodis & ~tx_offloads) { 448 DPAA2_PMD_INFO( 449 "Some of tx offloads enabled by default - requested 0x%" PRIx64 450 " fixed are 0x%" PRIx64, 451 tx_offloads, dev_tx_offloads_nodis); 452 } 453 454 if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { 455 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { 456 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, 457 priv->token, eth_conf->rxmode.max_rx_pkt_len 458 - RTE_ETHER_CRC_LEN); 459 if (ret) { 460 DPAA2_PMD_ERR( 461 "Unable to set mtu. check config"); 462 return ret; 463 } 464 dev->data->mtu = 465 dev->data->dev_conf.rxmode.max_rx_pkt_len - 466 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - 467 VLAN_TAG_SIZE; 468 } else { 469 return -1; 470 } 471 } 472 473 if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { 474 ret = dpaa2_setup_flow_dist(dev, 475 eth_conf->rx_adv_conf.rss_conf.rss_hf); 476 if (ret) { 477 DPAA2_PMD_ERR("Unable to set flow distribution." 478 "Check queue config"); 479 return ret; 480 } 481 } 482 483 if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) 484 rx_l3_csum_offload = true; 485 486 if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || 487 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) || 488 (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)) 489 rx_l4_csum_offload = true; 490 491 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 492 DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); 493 if (ret) { 494 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); 495 return ret; 496 } 497 498 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 499 DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); 500 if (ret) { 501 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); 502 return ret; 503 } 504 505 if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) 506 dpaa2_enable_ts = true; 507 508 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 509 tx_l3_csum_offload = true; 510 511 if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || 512 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || 513 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) 514 tx_l4_csum_offload = true; 515 516 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 517 DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); 518 if (ret) { 519 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); 520 return ret; 521 } 522 523 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 524 DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); 525 if (ret) { 526 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); 527 return ret; 528 } 529 530 /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in 531 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] 532 * to 0 for LS2 in the hardware thus disabling data/annotation 533 * stashing. For LX2 this is fixed in hardware and thus hash result and 534 * parse results can be received in FD using this option. 535 */ 536 if (dpaa2_svr_family == SVR_LX2160A) { 537 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, 538 DPNI_FLCTYPE_HASH, true); 539 if (ret) { 540 DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); 541 return ret; 542 } 543 } 544 545 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 546 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); 547 548 return 0; 549 } 550 551 /* Function to setup RX flow information. It contains traffic class ID, 552 * flow ID, destination configuration etc. 553 */ 554 static int 555 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, 556 uint16_t rx_queue_id, 557 uint16_t nb_rx_desc, 558 unsigned int socket_id __rte_unused, 559 const struct rte_eth_rxconf *rx_conf __rte_unused, 560 struct rte_mempool *mb_pool) 561 { 562 struct dpaa2_dev_priv *priv = dev->data->dev_private; 563 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 564 struct dpaa2_queue *dpaa2_q; 565 struct dpni_queue cfg; 566 uint8_t options = 0; 567 uint8_t flow_id; 568 uint32_t bpid; 569 int i, ret; 570 571 PMD_INIT_FUNC_TRACE(); 572 573 DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", 574 dev, rx_queue_id, mb_pool, rx_conf); 575 576 if (!priv->bp_list || priv->bp_list->mp != mb_pool) { 577 bpid = mempool_to_bpid(mb_pool); 578 ret = dpaa2_attach_bp_list(priv, 579 rte_dpaa2_bpid_info[bpid].bp_list); 580 if (ret) 581 return ret; 582 } 583 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 584 dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ 585 dpaa2_q->bp_array = rte_dpaa2_bpid_info; 586 587 /*Get the flow id from given VQ id*/ 588 flow_id = dpaa2_q->flow_id; 589 memset(&cfg, 0, sizeof(struct dpni_queue)); 590 591 options = options | DPNI_QUEUE_OPT_USER_CTX; 592 cfg.user_context = (size_t)(dpaa2_q); 593 594 /* check if a private cgr available. */ 595 for (i = 0; i < priv->max_cgs; i++) { 596 if (!priv->cgid_in_use[i]) { 597 priv->cgid_in_use[i] = 1; 598 break; 599 } 600 } 601 602 if (i < priv->max_cgs) { 603 options |= DPNI_QUEUE_OPT_SET_CGID; 604 cfg.cgid = i; 605 dpaa2_q->cgid = cfg.cgid; 606 } else { 607 dpaa2_q->cgid = 0xff; 608 } 609 610 /*if ls2088 or rev2 device, enable the stashing */ 611 612 if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { 613 options |= DPNI_QUEUE_OPT_FLC; 614 cfg.flc.stash_control = true; 615 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; 616 /* 00 00 00 - last 6 bit represent annotation, context stashing, 617 * data stashing setting 01 01 00 (0x14) 618 * (in following order ->DS AS CS) 619 * to enable 1 line data, 1 line annotation. 620 * For LX2, this setting should be 01 00 00 (0x10) 621 */ 622 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) 623 cfg.flc.value |= 0x10; 624 else 625 cfg.flc.value |= 0x14; 626 } 627 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, 628 dpaa2_q->tc_index, flow_id, options, &cfg); 629 if (ret) { 630 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); 631 return -1; 632 } 633 634 if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { 635 struct dpni_taildrop taildrop; 636 637 taildrop.enable = 1; 638 639 /* Private CGR will use tail drop length as nb_rx_desc. 640 * for rest cases we can use standard byte based tail drop. 641 * There is no HW restriction, but number of CGRs are limited, 642 * hence this restriction is placed. 643 */ 644 if (dpaa2_q->cgid != 0xff) { 645 /*enabling per rx queue congestion control */ 646 taildrop.threshold = nb_rx_desc; 647 taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; 648 taildrop.oal = 0; 649 DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d", 650 rx_queue_id); 651 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 652 DPNI_CP_CONGESTION_GROUP, 653 DPNI_QUEUE_RX, 654 dpaa2_q->tc_index, 655 dpaa2_q->cgid, &taildrop); 656 } else { 657 /*enabling per rx queue congestion control */ 658 taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; 659 taildrop.units = DPNI_CONGESTION_UNIT_BYTES; 660 taildrop.oal = CONG_RX_OAL; 661 DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d", 662 rx_queue_id); 663 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 664 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 665 dpaa2_q->tc_index, flow_id, 666 &taildrop); 667 } 668 if (ret) { 669 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 670 ret); 671 return -1; 672 } 673 } else { /* Disable tail Drop */ 674 struct dpni_taildrop taildrop = {0}; 675 DPAA2_PMD_INFO("Tail drop is disabled on queue"); 676 677 taildrop.enable = 0; 678 if (dpaa2_q->cgid != 0xff) { 679 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 680 DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, 681 dpaa2_q->tc_index, 682 dpaa2_q->cgid, &taildrop); 683 } else { 684 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, 685 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 686 dpaa2_q->tc_index, flow_id, &taildrop); 687 } 688 if (ret) { 689 DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", 690 ret); 691 return -1; 692 } 693 } 694 695 dev->data->rx_queues[rx_queue_id] = dpaa2_q; 696 return 0; 697 } 698 699 static int 700 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, 701 uint16_t tx_queue_id, 702 uint16_t nb_tx_desc __rte_unused, 703 unsigned int socket_id __rte_unused, 704 const struct rte_eth_txconf *tx_conf __rte_unused) 705 { 706 struct dpaa2_dev_priv *priv = dev->data->dev_private; 707 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) 708 priv->tx_vq[tx_queue_id]; 709 struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) 710 priv->tx_conf_vq[tx_queue_id]; 711 struct fsl_mc_io *dpni = dev->process_private; 712 struct dpni_queue tx_conf_cfg; 713 struct dpni_queue tx_flow_cfg; 714 uint8_t options = 0, flow_id; 715 struct dpni_queue_id qid; 716 uint32_t tc_id; 717 int ret; 718 719 PMD_INIT_FUNC_TRACE(); 720 721 /* Return if queue already configured */ 722 if (dpaa2_q->flow_id != 0xffff) { 723 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 724 return 0; 725 } 726 727 memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); 728 memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); 729 730 tc_id = tx_queue_id; 731 flow_id = 0; 732 733 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, 734 tc_id, flow_id, options, &tx_flow_cfg); 735 if (ret) { 736 DPAA2_PMD_ERR("Error in setting the tx flow: " 737 "tc_id=%d, flow=%d err=%d", 738 tc_id, flow_id, ret); 739 return -1; 740 } 741 742 dpaa2_q->flow_id = flow_id; 743 744 if (tx_queue_id == 0) { 745 /*Set tx-conf and error configuration*/ 746 if (priv->tx_conf_en) 747 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 748 priv->token, 749 DPNI_CONF_AFFINE); 750 else 751 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, 752 priv->token, 753 DPNI_CONF_DISABLE); 754 if (ret) { 755 DPAA2_PMD_ERR("Error in set tx conf mode settings: " 756 "err=%d", ret); 757 return -1; 758 } 759 } 760 dpaa2_q->tc_index = tc_id; 761 762 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 763 DPNI_QUEUE_TX, dpaa2_q->tc_index, 764 dpaa2_q->flow_id, &tx_flow_cfg, &qid); 765 if (ret) { 766 DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 767 return -1; 768 } 769 dpaa2_q->fqid = qid.fqid; 770 771 if (!(priv->flags & DPAA2_TX_CGR_OFF)) { 772 struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; 773 774 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; 775 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; 776 /* Notify that the queue is not congested when the data in 777 * the queue is below this thershold. 778 */ 779 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; 780 cong_notif_cfg.message_ctx = 0; 781 cong_notif_cfg.message_iova = 782 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); 783 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; 784 cong_notif_cfg.notification_mode = 785 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | 786 DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | 787 DPNI_CONG_OPT_COHERENT_WRITE; 788 cong_notif_cfg.cg_point = DPNI_CP_QUEUE; 789 790 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, 791 priv->token, 792 DPNI_QUEUE_TX, 793 tc_id, 794 &cong_notif_cfg); 795 if (ret) { 796 DPAA2_PMD_ERR( 797 "Error in setting tx congestion notification: " 798 "err=%d", ret); 799 return -ret; 800 } 801 } 802 dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; 803 dev->data->tx_queues[tx_queue_id] = dpaa2_q; 804 805 if (priv->tx_conf_en) { 806 dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; 807 options = options | DPNI_QUEUE_OPT_USER_CTX; 808 tx_conf_cfg.user_context = (size_t)(dpaa2_q); 809 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 810 DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, 811 dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); 812 if (ret) { 813 DPAA2_PMD_ERR("Error in setting the tx conf flow: " 814 "tc_index=%d, flow=%d err=%d", 815 dpaa2_tx_conf_q->tc_index, 816 dpaa2_tx_conf_q->flow_id, ret); 817 return -1; 818 } 819 820 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 821 DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, 822 dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); 823 if (ret) { 824 DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); 825 return -1; 826 } 827 dpaa2_tx_conf_q->fqid = qid.fqid; 828 } 829 return 0; 830 } 831 832 static void 833 dpaa2_dev_rx_queue_release(void *q __rte_unused) 834 { 835 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q; 836 struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; 837 struct fsl_mc_io *dpni = 838 (struct fsl_mc_io *)priv->eth_dev->process_private; 839 uint8_t options = 0; 840 int ret; 841 struct dpni_queue cfg; 842 843 memset(&cfg, 0, sizeof(struct dpni_queue)); 844 PMD_INIT_FUNC_TRACE(); 845 if (dpaa2_q->cgid != 0xff) { 846 options = DPNI_QUEUE_OPT_CLEAR_CGID; 847 cfg.cgid = dpaa2_q->cgid; 848 849 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, 850 DPNI_QUEUE_RX, 851 dpaa2_q->tc_index, dpaa2_q->flow_id, 852 options, &cfg); 853 if (ret) 854 DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", 855 dpaa2_q->fqid, ret); 856 priv->cgid_in_use[dpaa2_q->cgid] = 0; 857 dpaa2_q->cgid = 0xff; 858 } 859 } 860 861 static void 862 dpaa2_dev_tx_queue_release(void *q __rte_unused) 863 { 864 PMD_INIT_FUNC_TRACE(); 865 } 866 867 static uint32_t 868 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) 869 { 870 int32_t ret; 871 struct dpaa2_dev_priv *priv = dev->data->dev_private; 872 struct dpaa2_queue *dpaa2_q; 873 struct qbman_swp *swp; 874 struct qbman_fq_query_np_rslt state; 875 uint32_t frame_cnt = 0; 876 877 PMD_INIT_FUNC_TRACE(); 878 879 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 880 ret = dpaa2_affine_qbman_swp(); 881 if (ret) { 882 DPAA2_PMD_ERR("Failure in affining portal"); 883 return -EINVAL; 884 } 885 } 886 swp = DPAA2_PER_LCORE_PORTAL; 887 888 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; 889 890 if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { 891 frame_cnt = qbman_fq_state_frame_count(&state); 892 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", 893 rx_queue_id, frame_cnt); 894 } 895 return frame_cnt; 896 } 897 898 static const uint32_t * 899 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) 900 { 901 static const uint32_t ptypes[] = { 902 /*todo -= add more types */ 903 RTE_PTYPE_L2_ETHER, 904 RTE_PTYPE_L3_IPV4, 905 RTE_PTYPE_L3_IPV4_EXT, 906 RTE_PTYPE_L3_IPV6, 907 RTE_PTYPE_L3_IPV6_EXT, 908 RTE_PTYPE_L4_TCP, 909 RTE_PTYPE_L4_UDP, 910 RTE_PTYPE_L4_SCTP, 911 RTE_PTYPE_L4_ICMP, 912 RTE_PTYPE_UNKNOWN 913 }; 914 915 if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || 916 dev->rx_pkt_burst == dpaa2_dev_rx || 917 dev->rx_pkt_burst == dpaa2_dev_loopback_rx) 918 return ptypes; 919 return NULL; 920 } 921 922 /** 923 * Dpaa2 link Interrupt handler 924 * 925 * @param param 926 * The address of parameter (struct rte_eth_dev *) regsitered before. 927 * 928 * @return 929 * void 930 */ 931 static void 932 dpaa2_interrupt_handler(void *param) 933 { 934 struct rte_eth_dev *dev = param; 935 struct dpaa2_dev_priv *priv = dev->data->dev_private; 936 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 937 int ret; 938 int irq_index = DPNI_IRQ_INDEX; 939 unsigned int status = 0, clear = 0; 940 941 PMD_INIT_FUNC_TRACE(); 942 943 if (dpni == NULL) { 944 DPAA2_PMD_ERR("dpni is NULL"); 945 return; 946 } 947 948 ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, 949 irq_index, &status); 950 if (unlikely(ret)) { 951 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); 952 clear = 0xffffffff; 953 goto out; 954 } 955 956 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { 957 clear = DPNI_IRQ_EVENT_LINK_CHANGED; 958 dpaa2_dev_link_update(dev, 0); 959 /* calling all the apps registered for link status event */ 960 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 961 NULL); 962 } 963 out: 964 ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, 965 irq_index, clear); 966 if (unlikely(ret)) 967 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); 968 } 969 970 static int 971 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) 972 { 973 int err = 0; 974 struct dpaa2_dev_priv *priv = dev->data->dev_private; 975 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 976 int irq_index = DPNI_IRQ_INDEX; 977 unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; 978 979 PMD_INIT_FUNC_TRACE(); 980 981 err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, 982 irq_index, mask); 983 if (err < 0) { 984 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, 985 strerror(-err)); 986 return err; 987 } 988 989 err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, 990 irq_index, enable); 991 if (err < 0) 992 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, 993 strerror(-err)); 994 995 return err; 996 } 997 998 static int 999 dpaa2_dev_start(struct rte_eth_dev *dev) 1000 { 1001 struct rte_device *rdev = dev->device; 1002 struct rte_dpaa2_device *dpaa2_dev; 1003 struct rte_eth_dev_data *data = dev->data; 1004 struct dpaa2_dev_priv *priv = data->dev_private; 1005 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1006 struct dpni_queue cfg; 1007 struct dpni_error_cfg err_cfg; 1008 uint16_t qdid; 1009 struct dpni_queue_id qid; 1010 struct dpaa2_queue *dpaa2_q; 1011 int ret, i; 1012 struct rte_intr_handle *intr_handle; 1013 1014 dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); 1015 intr_handle = &dpaa2_dev->intr_handle; 1016 1017 PMD_INIT_FUNC_TRACE(); 1018 1019 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1020 if (ret) { 1021 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", 1022 priv->hw_id, ret); 1023 return ret; 1024 } 1025 1026 /* Power up the phy. Needed to make the link go UP */ 1027 dpaa2_dev_set_link_up(dev); 1028 1029 ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, 1030 DPNI_QUEUE_TX, &qdid); 1031 if (ret) { 1032 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); 1033 return ret; 1034 } 1035 priv->qdid = qdid; 1036 1037 for (i = 0; i < data->nb_rx_queues; i++) { 1038 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; 1039 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, 1040 DPNI_QUEUE_RX, dpaa2_q->tc_index, 1041 dpaa2_q->flow_id, &cfg, &qid); 1042 if (ret) { 1043 DPAA2_PMD_ERR("Error in getting flow information: " 1044 "err=%d", ret); 1045 return ret; 1046 } 1047 dpaa2_q->fqid = qid.fqid; 1048 } 1049 1050 /*checksum errors, send them to normal path and set it in annotation */ 1051 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; 1052 err_cfg.errors |= DPNI_ERROR_PHE; 1053 1054 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; 1055 err_cfg.set_frame_annotation = true; 1056 1057 ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, 1058 priv->token, &err_cfg); 1059 if (ret) { 1060 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", 1061 ret); 1062 return ret; 1063 } 1064 1065 /* if the interrupts were configured on this devices*/ 1066 if (intr_handle && (intr_handle->fd) && 1067 (dev->data->dev_conf.intr_conf.lsc != 0)) { 1068 /* Registering LSC interrupt handler */ 1069 rte_intr_callback_register(intr_handle, 1070 dpaa2_interrupt_handler, 1071 (void *)dev); 1072 1073 /* enable vfio intr/eventfd mapping 1074 * Interrupt index 0 is required, so we can not use 1075 * rte_intr_enable. 1076 */ 1077 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); 1078 1079 /* enable dpni_irqs */ 1080 dpaa2_eth_setup_irqs(dev, 1); 1081 } 1082 1083 /* Change the tx burst function if ordered queues are used */ 1084 if (priv->en_ordered) 1085 dev->tx_pkt_burst = dpaa2_dev_tx_ordered; 1086 1087 return 0; 1088 } 1089 1090 /** 1091 * This routine disables all traffic on the adapter by issuing a 1092 * global reset on the MAC. 1093 */ 1094 static void 1095 dpaa2_dev_stop(struct rte_eth_dev *dev) 1096 { 1097 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1098 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1099 int ret; 1100 struct rte_eth_link link; 1101 struct rte_intr_handle *intr_handle = dev->intr_handle; 1102 1103 PMD_INIT_FUNC_TRACE(); 1104 1105 /* reset interrupt callback */ 1106 if (intr_handle && (intr_handle->fd) && 1107 (dev->data->dev_conf.intr_conf.lsc != 0)) { 1108 /*disable dpni irqs */ 1109 dpaa2_eth_setup_irqs(dev, 0); 1110 1111 /* disable vfio intr before callback unregister */ 1112 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); 1113 1114 /* Unregistering LSC interrupt handler */ 1115 rte_intr_callback_unregister(intr_handle, 1116 dpaa2_interrupt_handler, 1117 (void *)dev); 1118 } 1119 1120 dpaa2_dev_set_link_down(dev); 1121 1122 ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); 1123 if (ret) { 1124 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", 1125 ret, priv->hw_id); 1126 return; 1127 } 1128 1129 /* clear the recorded link status */ 1130 memset(&link, 0, sizeof(link)); 1131 rte_eth_linkstatus_set(dev, &link); 1132 } 1133 1134 static void 1135 dpaa2_dev_close(struct rte_eth_dev *dev) 1136 { 1137 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1138 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1139 int ret; 1140 struct rte_eth_link link; 1141 1142 PMD_INIT_FUNC_TRACE(); 1143 1144 dpaa2_flow_clean(dev); 1145 1146 /* Clean the device first */ 1147 ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); 1148 if (ret) { 1149 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); 1150 return; 1151 } 1152 1153 memset(&link, 0, sizeof(link)); 1154 rte_eth_linkstatus_set(dev, &link); 1155 } 1156 1157 static int 1158 dpaa2_dev_promiscuous_enable( 1159 struct rte_eth_dev *dev) 1160 { 1161 int ret; 1162 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1163 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1164 1165 PMD_INIT_FUNC_TRACE(); 1166 1167 if (dpni == NULL) { 1168 DPAA2_PMD_ERR("dpni is NULL"); 1169 return -ENODEV; 1170 } 1171 1172 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 1173 if (ret < 0) 1174 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); 1175 1176 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 1177 if (ret < 0) 1178 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); 1179 1180 return ret; 1181 } 1182 1183 static int 1184 dpaa2_dev_promiscuous_disable( 1185 struct rte_eth_dev *dev) 1186 { 1187 int ret; 1188 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1189 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1190 1191 PMD_INIT_FUNC_TRACE(); 1192 1193 if (dpni == NULL) { 1194 DPAA2_PMD_ERR("dpni is NULL"); 1195 return -ENODEV; 1196 } 1197 1198 ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1199 if (ret < 0) 1200 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); 1201 1202 if (dev->data->all_multicast == 0) { 1203 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, 1204 priv->token, false); 1205 if (ret < 0) 1206 DPAA2_PMD_ERR("Unable to disable M promisc mode %d", 1207 ret); 1208 } 1209 1210 return ret; 1211 } 1212 1213 static int 1214 dpaa2_dev_allmulticast_enable( 1215 struct rte_eth_dev *dev) 1216 { 1217 int ret; 1218 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1219 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1220 1221 PMD_INIT_FUNC_TRACE(); 1222 1223 if (dpni == NULL) { 1224 DPAA2_PMD_ERR("dpni is NULL"); 1225 return -ENODEV; 1226 } 1227 1228 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); 1229 if (ret < 0) 1230 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); 1231 1232 return ret; 1233 } 1234 1235 static int 1236 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) 1237 { 1238 int ret; 1239 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1240 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1241 1242 PMD_INIT_FUNC_TRACE(); 1243 1244 if (dpni == NULL) { 1245 DPAA2_PMD_ERR("dpni is NULL"); 1246 return -ENODEV; 1247 } 1248 1249 /* must remain on for all promiscuous */ 1250 if (dev->data->promiscuous == 1) 1251 return 0; 1252 1253 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); 1254 if (ret < 0) 1255 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); 1256 1257 return ret; 1258 } 1259 1260 static int 1261 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1262 { 1263 int ret; 1264 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1265 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1266 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN 1267 + VLAN_TAG_SIZE; 1268 1269 PMD_INIT_FUNC_TRACE(); 1270 1271 if (dpni == NULL) { 1272 DPAA2_PMD_ERR("dpni is NULL"); 1273 return -EINVAL; 1274 } 1275 1276 /* check that mtu is within the allowed range */ 1277 if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN) 1278 return -EINVAL; 1279 1280 if (frame_size > RTE_ETHER_MAX_LEN) 1281 dev->data->dev_conf.rxmode.offloads |= 1282 DEV_RX_OFFLOAD_JUMBO_FRAME; 1283 else 1284 dev->data->dev_conf.rxmode.offloads &= 1285 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 1286 1287 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1288 1289 /* Set the Max Rx frame length as 'mtu' + 1290 * Maximum Ethernet header length 1291 */ 1292 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, 1293 frame_size - RTE_ETHER_CRC_LEN); 1294 if (ret) { 1295 DPAA2_PMD_ERR("Setting the max frame length failed"); 1296 return -1; 1297 } 1298 DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); 1299 return 0; 1300 } 1301 1302 static int 1303 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, 1304 struct rte_ether_addr *addr, 1305 __rte_unused uint32_t index, 1306 __rte_unused uint32_t pool) 1307 { 1308 int ret; 1309 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1310 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1311 1312 PMD_INIT_FUNC_TRACE(); 1313 1314 if (dpni == NULL) { 1315 DPAA2_PMD_ERR("dpni is NULL"); 1316 return -1; 1317 } 1318 1319 ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, 1320 addr->addr_bytes, 0, 0, 0); 1321 if (ret) 1322 DPAA2_PMD_ERR( 1323 "error: Adding the MAC ADDR failed: err = %d", ret); 1324 return 0; 1325 } 1326 1327 static void 1328 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, 1329 uint32_t index) 1330 { 1331 int ret; 1332 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1333 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1334 struct rte_eth_dev_data *data = dev->data; 1335 struct rte_ether_addr *macaddr; 1336 1337 PMD_INIT_FUNC_TRACE(); 1338 1339 macaddr = &data->mac_addrs[index]; 1340 1341 if (dpni == NULL) { 1342 DPAA2_PMD_ERR("dpni is NULL"); 1343 return; 1344 } 1345 1346 ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, 1347 priv->token, macaddr->addr_bytes); 1348 if (ret) 1349 DPAA2_PMD_ERR( 1350 "error: Removing the MAC ADDR failed: err = %d", ret); 1351 } 1352 1353 static int 1354 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, 1355 struct rte_ether_addr *addr) 1356 { 1357 int ret; 1358 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1359 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1360 1361 PMD_INIT_FUNC_TRACE(); 1362 1363 if (dpni == NULL) { 1364 DPAA2_PMD_ERR("dpni is NULL"); 1365 return -EINVAL; 1366 } 1367 1368 ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, 1369 priv->token, addr->addr_bytes); 1370 1371 if (ret) 1372 DPAA2_PMD_ERR( 1373 "error: Setting the MAC ADDR failed %d", ret); 1374 1375 return ret; 1376 } 1377 1378 static 1379 int dpaa2_dev_stats_get(struct rte_eth_dev *dev, 1380 struct rte_eth_stats *stats) 1381 { 1382 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1383 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1384 int32_t retcode; 1385 uint8_t page0 = 0, page1 = 1, page2 = 2; 1386 union dpni_statistics value; 1387 int i; 1388 struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; 1389 1390 memset(&value, 0, sizeof(union dpni_statistics)); 1391 1392 PMD_INIT_FUNC_TRACE(); 1393 1394 if (!dpni) { 1395 DPAA2_PMD_ERR("dpni is NULL"); 1396 return -EINVAL; 1397 } 1398 1399 if (!stats) { 1400 DPAA2_PMD_ERR("stats is NULL"); 1401 return -EINVAL; 1402 } 1403 1404 /*Get Counters from page_0*/ 1405 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1406 page0, 0, &value); 1407 if (retcode) 1408 goto err; 1409 1410 stats->ipackets = value.page_0.ingress_all_frames; 1411 stats->ibytes = value.page_0.ingress_all_bytes; 1412 1413 /*Get Counters from page_1*/ 1414 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1415 page1, 0, &value); 1416 if (retcode) 1417 goto err; 1418 1419 stats->opackets = value.page_1.egress_all_frames; 1420 stats->obytes = value.page_1.egress_all_bytes; 1421 1422 /*Get Counters from page_2*/ 1423 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1424 page2, 0, &value); 1425 if (retcode) 1426 goto err; 1427 1428 /* Ingress drop frame count due to configured rules */ 1429 stats->ierrors = value.page_2.ingress_filtered_frames; 1430 /* Ingress drop frame count due to error */ 1431 stats->ierrors += value.page_2.ingress_discarded_frames; 1432 1433 stats->oerrors = value.page_2.egress_discarded_frames; 1434 stats->imissed = value.page_2.ingress_nobuffer_discards; 1435 1436 /* Fill in per queue stats */ 1437 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && 1438 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { 1439 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; 1440 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; 1441 if (dpaa2_rxq) 1442 stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; 1443 if (dpaa2_txq) 1444 stats->q_opackets[i] = dpaa2_txq->tx_pkts; 1445 1446 /* Byte counting is not implemented */ 1447 stats->q_ibytes[i] = 0; 1448 stats->q_obytes[i] = 0; 1449 } 1450 1451 return 0; 1452 1453 err: 1454 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1455 return retcode; 1456 }; 1457 1458 static int 1459 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1460 unsigned int n) 1461 { 1462 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1463 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1464 int32_t retcode; 1465 union dpni_statistics value[5] = {}; 1466 unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); 1467 1468 if (n < num) 1469 return num; 1470 1471 if (xstats == NULL) 1472 return 0; 1473 1474 /* Get Counters from page_0*/ 1475 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1476 0, 0, &value[0]); 1477 if (retcode) 1478 goto err; 1479 1480 /* Get Counters from page_1*/ 1481 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1482 1, 0, &value[1]); 1483 if (retcode) 1484 goto err; 1485 1486 /* Get Counters from page_2*/ 1487 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1488 2, 0, &value[2]); 1489 if (retcode) 1490 goto err; 1491 1492 for (i = 0; i < priv->max_cgs; i++) { 1493 if (!priv->cgid_in_use[i]) { 1494 /* Get Counters from page_4*/ 1495 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, 1496 priv->token, 1497 4, 0, &value[4]); 1498 if (retcode) 1499 goto err; 1500 break; 1501 } 1502 } 1503 1504 for (i = 0; i < num; i++) { 1505 xstats[i].id = i; 1506 xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. 1507 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1508 } 1509 return i; 1510 err: 1511 DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); 1512 return retcode; 1513 } 1514 1515 static int 1516 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 1517 struct rte_eth_xstat_name *xstats_names, 1518 unsigned int limit) 1519 { 1520 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1521 1522 if (limit < stat_cnt) 1523 return stat_cnt; 1524 1525 if (xstats_names != NULL) 1526 for (i = 0; i < stat_cnt; i++) 1527 strlcpy(xstats_names[i].name, 1528 dpaa2_xstats_strings[i].name, 1529 sizeof(xstats_names[i].name)); 1530 1531 return stat_cnt; 1532 } 1533 1534 static int 1535 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 1536 uint64_t *values, unsigned int n) 1537 { 1538 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1539 uint64_t values_copy[stat_cnt]; 1540 1541 if (!ids) { 1542 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1543 struct fsl_mc_io *dpni = 1544 (struct fsl_mc_io *)dev->process_private; 1545 int32_t retcode; 1546 union dpni_statistics value[5] = {}; 1547 1548 if (n < stat_cnt) 1549 return stat_cnt; 1550 1551 if (!values) 1552 return 0; 1553 1554 /* Get Counters from page_0*/ 1555 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1556 0, 0, &value[0]); 1557 if (retcode) 1558 return 0; 1559 1560 /* Get Counters from page_1*/ 1561 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1562 1, 0, &value[1]); 1563 if (retcode) 1564 return 0; 1565 1566 /* Get Counters from page_2*/ 1567 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1568 2, 0, &value[2]); 1569 if (retcode) 1570 return 0; 1571 1572 /* Get Counters from page_4*/ 1573 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, 1574 4, 0, &value[4]); 1575 if (retcode) 1576 return 0; 1577 1578 for (i = 0; i < stat_cnt; i++) { 1579 values[i] = value[dpaa2_xstats_strings[i].page_id]. 1580 raw.counter[dpaa2_xstats_strings[i].stats_id]; 1581 } 1582 return stat_cnt; 1583 } 1584 1585 dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); 1586 1587 for (i = 0; i < n; i++) { 1588 if (ids[i] >= stat_cnt) { 1589 DPAA2_PMD_ERR("xstats id value isn't valid"); 1590 return -1; 1591 } 1592 values[i] = values_copy[ids[i]]; 1593 } 1594 return n; 1595 } 1596 1597 static int 1598 dpaa2_xstats_get_names_by_id( 1599 struct rte_eth_dev *dev, 1600 struct rte_eth_xstat_name *xstats_names, 1601 const uint64_t *ids, 1602 unsigned int limit) 1603 { 1604 unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); 1605 struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; 1606 1607 if (!ids) 1608 return dpaa2_xstats_get_names(dev, xstats_names, limit); 1609 1610 dpaa2_xstats_get_names(dev, xstats_names_copy, limit); 1611 1612 for (i = 0; i < limit; i++) { 1613 if (ids[i] >= stat_cnt) { 1614 DPAA2_PMD_ERR("xstats id value isn't valid"); 1615 return -1; 1616 } 1617 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); 1618 } 1619 return limit; 1620 } 1621 1622 static int 1623 dpaa2_dev_stats_reset(struct rte_eth_dev *dev) 1624 { 1625 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1626 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1627 int retcode; 1628 int i; 1629 struct dpaa2_queue *dpaa2_q; 1630 1631 PMD_INIT_FUNC_TRACE(); 1632 1633 if (dpni == NULL) { 1634 DPAA2_PMD_ERR("dpni is NULL"); 1635 return -EINVAL; 1636 } 1637 1638 retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); 1639 if (retcode) 1640 goto error; 1641 1642 /* Reset the per queue stats in dpaa2_queue structure */ 1643 for (i = 0; i < priv->nb_rx_queues; i++) { 1644 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; 1645 if (dpaa2_q) 1646 dpaa2_q->rx_pkts = 0; 1647 } 1648 1649 for (i = 0; i < priv->nb_tx_queues; i++) { 1650 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; 1651 if (dpaa2_q) 1652 dpaa2_q->tx_pkts = 0; 1653 } 1654 1655 return 0; 1656 1657 error: 1658 DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); 1659 return retcode; 1660 }; 1661 1662 /* return 0 means link status changed, -1 means not changed */ 1663 static int 1664 dpaa2_dev_link_update(struct rte_eth_dev *dev, 1665 int wait_to_complete __rte_unused) 1666 { 1667 int ret; 1668 struct dpaa2_dev_priv *priv = dev->data->dev_private; 1669 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 1670 struct rte_eth_link link; 1671 struct dpni_link_state state = {0}; 1672 1673 if (dpni == NULL) { 1674 DPAA2_PMD_ERR("dpni is NULL"); 1675 return 0; 1676 } 1677 1678 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1679 if (ret < 0) { 1680 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); 1681 return -1; 1682 } 1683 1684 memset(&link, 0, sizeof(struct rte_eth_link)); 1685 link.link_status = state.up; 1686 link.link_speed = state.rate; 1687 1688 if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) 1689 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1690 else 1691 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1692 1693 ret = rte_eth_linkstatus_set(dev, &link); 1694 if (ret == -1) 1695 DPAA2_PMD_DEBUG("No change in status"); 1696 else 1697 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, 1698 link.link_status ? "Up" : "Down"); 1699 1700 return ret; 1701 } 1702 1703 /** 1704 * Toggle the DPNI to enable, if not already enabled. 1705 * This is not strictly PHY up/down - it is more of logical toggling. 1706 */ 1707 static int 1708 dpaa2_dev_set_link_up(struct rte_eth_dev *dev) 1709 { 1710 int ret = -EINVAL; 1711 struct dpaa2_dev_priv *priv; 1712 struct fsl_mc_io *dpni; 1713 int en = 0; 1714 struct dpni_link_state state = {0}; 1715 1716 priv = dev->data->dev_private; 1717 dpni = (struct fsl_mc_io *)dev->process_private; 1718 1719 if (dpni == NULL) { 1720 DPAA2_PMD_ERR("dpni is NULL"); 1721 return ret; 1722 } 1723 1724 /* Check if DPNI is currently enabled */ 1725 ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); 1726 if (ret) { 1727 /* Unable to obtain dpni status; Not continuing */ 1728 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1729 return -EINVAL; 1730 } 1731 1732 /* Enable link if not already enabled */ 1733 if (!en) { 1734 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); 1735 if (ret) { 1736 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); 1737 return -EINVAL; 1738 } 1739 } 1740 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1741 if (ret < 0) { 1742 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); 1743 return -1; 1744 } 1745 1746 /* changing tx burst function to start enqueues */ 1747 dev->tx_pkt_burst = dpaa2_dev_tx; 1748 dev->data->dev_link.link_status = state.up; 1749 dev->data->dev_link.link_speed = state.rate; 1750 1751 if (state.up) 1752 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); 1753 else 1754 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); 1755 return ret; 1756 } 1757 1758 /** 1759 * Toggle the DPNI to disable, if not already disabled. 1760 * This is not strictly PHY up/down - it is more of logical toggling. 1761 */ 1762 static int 1763 dpaa2_dev_set_link_down(struct rte_eth_dev *dev) 1764 { 1765 int ret = -EINVAL; 1766 struct dpaa2_dev_priv *priv; 1767 struct fsl_mc_io *dpni; 1768 int dpni_enabled = 0; 1769 int retries = 10; 1770 1771 PMD_INIT_FUNC_TRACE(); 1772 1773 priv = dev->data->dev_private; 1774 dpni = (struct fsl_mc_io *)dev->process_private; 1775 1776 if (dpni == NULL) { 1777 DPAA2_PMD_ERR("Device has not yet been configured"); 1778 return ret; 1779 } 1780 1781 /*changing tx burst function to avoid any more enqueues */ 1782 dev->tx_pkt_burst = dummy_dev_tx; 1783 1784 /* Loop while dpni_disable() attempts to drain the egress FQs 1785 * and confirm them back to us. 1786 */ 1787 do { 1788 ret = dpni_disable(dpni, 0, priv->token); 1789 if (ret) { 1790 DPAA2_PMD_ERR("dpni disable failed (%d)", ret); 1791 return ret; 1792 } 1793 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); 1794 if (ret) { 1795 DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); 1796 return ret; 1797 } 1798 if (dpni_enabled) 1799 /* Allow the MC some slack */ 1800 rte_delay_us(100 * 1000); 1801 } while (dpni_enabled && --retries); 1802 1803 if (!retries) { 1804 DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); 1805 /* todo- we may have to manually cleanup queues. 1806 */ 1807 } else { 1808 DPAA2_PMD_INFO("Port %d Link DOWN successful", 1809 dev->data->port_id); 1810 } 1811 1812 dev->data->dev_link.link_status = 0; 1813 1814 return ret; 1815 } 1816 1817 static int 1818 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1819 { 1820 int ret = -EINVAL; 1821 struct dpaa2_dev_priv *priv; 1822 struct fsl_mc_io *dpni; 1823 struct dpni_link_state state = {0}; 1824 1825 PMD_INIT_FUNC_TRACE(); 1826 1827 priv = dev->data->dev_private; 1828 dpni = (struct fsl_mc_io *)dev->process_private; 1829 1830 if (dpni == NULL || fc_conf == NULL) { 1831 DPAA2_PMD_ERR("device not configured"); 1832 return ret; 1833 } 1834 1835 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1836 if (ret) { 1837 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); 1838 return ret; 1839 } 1840 1841 memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); 1842 if (state.options & DPNI_LINK_OPT_PAUSE) { 1843 /* DPNI_LINK_OPT_PAUSE set 1844 * if ASYM_PAUSE not set, 1845 * RX Side flow control (handle received Pause frame) 1846 * TX side flow control (send Pause frame) 1847 * if ASYM_PAUSE set, 1848 * RX Side flow control (handle received Pause frame) 1849 * No TX side flow control (send Pause frame disabled) 1850 */ 1851 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) 1852 fc_conf->mode = RTE_FC_FULL; 1853 else 1854 fc_conf->mode = RTE_FC_RX_PAUSE; 1855 } else { 1856 /* DPNI_LINK_OPT_PAUSE not set 1857 * if ASYM_PAUSE set, 1858 * TX side flow control (send Pause frame) 1859 * No RX side flow control (No action on pause frame rx) 1860 * if ASYM_PAUSE not set, 1861 * Flow control disabled 1862 */ 1863 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) 1864 fc_conf->mode = RTE_FC_TX_PAUSE; 1865 else 1866 fc_conf->mode = RTE_FC_NONE; 1867 } 1868 1869 return ret; 1870 } 1871 1872 static int 1873 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 1874 { 1875 int ret = -EINVAL; 1876 struct dpaa2_dev_priv *priv; 1877 struct fsl_mc_io *dpni; 1878 struct dpni_link_state state = {0}; 1879 struct dpni_link_cfg cfg = {0}; 1880 1881 PMD_INIT_FUNC_TRACE(); 1882 1883 priv = dev->data->dev_private; 1884 dpni = (struct fsl_mc_io *)dev->process_private; 1885 1886 if (dpni == NULL) { 1887 DPAA2_PMD_ERR("dpni is NULL"); 1888 return ret; 1889 } 1890 1891 /* It is necessary to obtain the current state before setting fc_conf 1892 * as MC would return error in case rate, autoneg or duplex values are 1893 * different. 1894 */ 1895 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); 1896 if (ret) { 1897 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); 1898 return -1; 1899 } 1900 1901 /* Disable link before setting configuration */ 1902 dpaa2_dev_set_link_down(dev); 1903 1904 /* Based on fc_conf, update cfg */ 1905 cfg.rate = state.rate; 1906 cfg.options = state.options; 1907 1908 /* update cfg with fc_conf */ 1909 switch (fc_conf->mode) { 1910 case RTE_FC_FULL: 1911 /* Full flow control; 1912 * OPT_PAUSE set, ASYM_PAUSE not set 1913 */ 1914 cfg.options |= DPNI_LINK_OPT_PAUSE; 1915 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1916 break; 1917 case RTE_FC_TX_PAUSE: 1918 /* Enable RX flow control 1919 * OPT_PAUSE not set; 1920 * ASYM_PAUSE set; 1921 */ 1922 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1923 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1924 break; 1925 case RTE_FC_RX_PAUSE: 1926 /* Enable TX Flow control 1927 * OPT_PAUSE set 1928 * ASYM_PAUSE set 1929 */ 1930 cfg.options |= DPNI_LINK_OPT_PAUSE; 1931 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; 1932 break; 1933 case RTE_FC_NONE: 1934 /* Disable Flow control 1935 * OPT_PAUSE not set 1936 * ASYM_PAUSE not set 1937 */ 1938 cfg.options &= ~DPNI_LINK_OPT_PAUSE; 1939 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 1940 break; 1941 default: 1942 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", 1943 fc_conf->mode); 1944 return -1; 1945 } 1946 1947 ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); 1948 if (ret) 1949 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", 1950 ret); 1951 1952 /* Enable link */ 1953 dpaa2_dev_set_link_up(dev); 1954 1955 return ret; 1956 } 1957 1958 static int 1959 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, 1960 struct rte_eth_rss_conf *rss_conf) 1961 { 1962 struct rte_eth_dev_data *data = dev->data; 1963 struct rte_eth_conf *eth_conf = &data->dev_conf; 1964 int ret; 1965 1966 PMD_INIT_FUNC_TRACE(); 1967 1968 if (rss_conf->rss_hf) { 1969 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); 1970 if (ret) { 1971 DPAA2_PMD_ERR("Unable to set flow dist"); 1972 return ret; 1973 } 1974 } else { 1975 ret = dpaa2_remove_flow_dist(dev, 0); 1976 if (ret) { 1977 DPAA2_PMD_ERR("Unable to remove flow dist"); 1978 return ret; 1979 } 1980 } 1981 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; 1982 return 0; 1983 } 1984 1985 static int 1986 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 1987 struct rte_eth_rss_conf *rss_conf) 1988 { 1989 struct rte_eth_dev_data *data = dev->data; 1990 struct rte_eth_conf *eth_conf = &data->dev_conf; 1991 1992 /* dpaa2 does not support rss_key, so length should be 0*/ 1993 rss_conf->rss_key_len = 0; 1994 rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; 1995 return 0; 1996 } 1997 1998 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, 1999 int eth_rx_queue_id, 2000 struct dpaa2_dpcon_dev *dpcon, 2001 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 2002 { 2003 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 2004 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2005 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2006 uint8_t flow_id = dpaa2_ethq->flow_id; 2007 struct dpni_queue cfg; 2008 uint8_t options, priority; 2009 int ret; 2010 2011 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) 2012 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; 2013 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) 2014 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; 2015 else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED) 2016 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event; 2017 else 2018 return -EINVAL; 2019 2020 priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * 2021 (dpcon->num_priorities - 1); 2022 2023 memset(&cfg, 0, sizeof(struct dpni_queue)); 2024 options = DPNI_QUEUE_OPT_DEST; 2025 cfg.destination.type = DPNI_DEST_DPCON; 2026 cfg.destination.id = dpcon->dpcon_id; 2027 cfg.destination.priority = priority; 2028 2029 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { 2030 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; 2031 cfg.destination.hold_active = 1; 2032 } 2033 2034 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED && 2035 !eth_priv->en_ordered) { 2036 struct opr_cfg ocfg; 2037 2038 /* Restoration window size = 256 frames */ 2039 ocfg.oprrws = 3; 2040 /* Restoration window size = 512 frames for LX2 */ 2041 if (dpaa2_svr_family == SVR_LX2160A) 2042 ocfg.oprrws = 4; 2043 /* Auto advance NESN window enabled */ 2044 ocfg.oa = 1; 2045 /* Late arrival window size disabled */ 2046 ocfg.olws = 0; 2047 /* ORL resource exhaustaion advance NESN disabled */ 2048 ocfg.oeane = 0; 2049 /* Loose ordering enabled */ 2050 ocfg.oloe = 1; 2051 eth_priv->en_loose_ordered = 1; 2052 /* Strict ordering enabled if explicitly set */ 2053 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) { 2054 ocfg.oloe = 0; 2055 eth_priv->en_loose_ordered = 0; 2056 } 2057 2058 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, 2059 dpaa2_ethq->tc_index, flow_id, 2060 OPR_OPT_CREATE, &ocfg); 2061 if (ret) { 2062 DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret); 2063 return ret; 2064 } 2065 2066 eth_priv->en_ordered = 1; 2067 } 2068 2069 options |= DPNI_QUEUE_OPT_USER_CTX; 2070 cfg.user_context = (size_t)(dpaa2_ethq); 2071 2072 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2073 dpaa2_ethq->tc_index, flow_id, options, &cfg); 2074 if (ret) { 2075 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2076 return ret; 2077 } 2078 2079 memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); 2080 2081 return 0; 2082 } 2083 2084 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, 2085 int eth_rx_queue_id) 2086 { 2087 struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; 2088 struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; 2089 struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; 2090 uint8_t flow_id = dpaa2_ethq->flow_id; 2091 struct dpni_queue cfg; 2092 uint8_t options; 2093 int ret; 2094 2095 memset(&cfg, 0, sizeof(struct dpni_queue)); 2096 options = DPNI_QUEUE_OPT_DEST; 2097 cfg.destination.type = DPNI_DEST_NONE; 2098 2099 ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, 2100 dpaa2_ethq->tc_index, flow_id, options, &cfg); 2101 if (ret) 2102 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); 2103 2104 return ret; 2105 } 2106 2107 static inline int 2108 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op) 2109 { 2110 unsigned int i; 2111 2112 for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) { 2113 if (dpaa2_supported_filter_ops[i] == filter_op) 2114 return 0; 2115 } 2116 return -ENOTSUP; 2117 } 2118 2119 static int 2120 dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev, 2121 enum rte_filter_type filter_type, 2122 enum rte_filter_op filter_op, 2123 void *arg) 2124 { 2125 int ret = 0; 2126 2127 if (!dev) 2128 return -ENODEV; 2129 2130 switch (filter_type) { 2131 case RTE_ETH_FILTER_GENERIC: 2132 if (dpaa2_dev_verify_filter_ops(filter_op) < 0) { 2133 ret = -ENOTSUP; 2134 break; 2135 } 2136 *(const void **)arg = &dpaa2_flow_ops; 2137 dpaa2_filter_type |= filter_type; 2138 break; 2139 default: 2140 RTE_LOG(ERR, PMD, "Filter type (%d) not supported", 2141 filter_type); 2142 ret = -ENOTSUP; 2143 break; 2144 } 2145 return ret; 2146 } 2147 2148 static struct eth_dev_ops dpaa2_ethdev_ops = { 2149 .dev_configure = dpaa2_eth_dev_configure, 2150 .dev_start = dpaa2_dev_start, 2151 .dev_stop = dpaa2_dev_stop, 2152 .dev_close = dpaa2_dev_close, 2153 .promiscuous_enable = dpaa2_dev_promiscuous_enable, 2154 .promiscuous_disable = dpaa2_dev_promiscuous_disable, 2155 .allmulticast_enable = dpaa2_dev_allmulticast_enable, 2156 .allmulticast_disable = dpaa2_dev_allmulticast_disable, 2157 .dev_set_link_up = dpaa2_dev_set_link_up, 2158 .dev_set_link_down = dpaa2_dev_set_link_down, 2159 .link_update = dpaa2_dev_link_update, 2160 .stats_get = dpaa2_dev_stats_get, 2161 .xstats_get = dpaa2_dev_xstats_get, 2162 .xstats_get_by_id = dpaa2_xstats_get_by_id, 2163 .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, 2164 .xstats_get_names = dpaa2_xstats_get_names, 2165 .stats_reset = dpaa2_dev_stats_reset, 2166 .xstats_reset = dpaa2_dev_stats_reset, 2167 .fw_version_get = dpaa2_fw_version_get, 2168 .dev_infos_get = dpaa2_dev_info_get, 2169 .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, 2170 .mtu_set = dpaa2_dev_mtu_set, 2171 .vlan_filter_set = dpaa2_vlan_filter_set, 2172 .vlan_offload_set = dpaa2_vlan_offload_set, 2173 .vlan_tpid_set = dpaa2_vlan_tpid_set, 2174 .rx_queue_setup = dpaa2_dev_rx_queue_setup, 2175 .rx_queue_release = dpaa2_dev_rx_queue_release, 2176 .tx_queue_setup = dpaa2_dev_tx_queue_setup, 2177 .tx_queue_release = dpaa2_dev_tx_queue_release, 2178 .rx_queue_count = dpaa2_dev_rx_queue_count, 2179 .flow_ctrl_get = dpaa2_flow_ctrl_get, 2180 .flow_ctrl_set = dpaa2_flow_ctrl_set, 2181 .mac_addr_add = dpaa2_dev_add_mac_addr, 2182 .mac_addr_remove = dpaa2_dev_remove_mac_addr, 2183 .mac_addr_set = dpaa2_dev_set_mac_addr, 2184 .rss_hash_update = dpaa2_dev_rss_hash_update, 2185 .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, 2186 .filter_ctrl = dpaa2_dev_flow_ctrl, 2187 #if defined(RTE_LIBRTE_IEEE1588) 2188 .timesync_enable = dpaa2_timesync_enable, 2189 .timesync_disable = dpaa2_timesync_disable, 2190 .timesync_read_time = dpaa2_timesync_read_time, 2191 .timesync_write_time = dpaa2_timesync_write_time, 2192 .timesync_adjust_time = dpaa2_timesync_adjust_time, 2193 .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, 2194 .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, 2195 #endif 2196 }; 2197 2198 /* Populate the mac address from physically available (u-boot/firmware) and/or 2199 * one set by higher layers like MC (restool) etc. 2200 * Returns the table of MAC entries (multiple entries) 2201 */ 2202 static int 2203 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, 2204 struct rte_ether_addr *mac_entry) 2205 { 2206 int ret; 2207 struct rte_ether_addr phy_mac, prime_mac; 2208 2209 memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); 2210 memset(&prime_mac, 0, sizeof(struct rte_ether_addr)); 2211 2212 /* Get the physical device MAC address */ 2213 ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2214 phy_mac.addr_bytes); 2215 if (ret) { 2216 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); 2217 goto cleanup; 2218 } 2219 2220 ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, 2221 prime_mac.addr_bytes); 2222 if (ret) { 2223 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); 2224 goto cleanup; 2225 } 2226 2227 /* Now that both MAC have been obtained, do: 2228 * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy 2229 * and return phy 2230 * If empty_mac(phy), return prime. 2231 * if both are empty, create random MAC, set as prime and return 2232 */ 2233 if (!rte_is_zero_ether_addr(&phy_mac)) { 2234 /* If the addresses are not same, overwrite prime */ 2235 if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) { 2236 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2237 priv->token, 2238 phy_mac.addr_bytes); 2239 if (ret) { 2240 DPAA2_PMD_ERR("Unable to set MAC Address: %d", 2241 ret); 2242 goto cleanup; 2243 } 2244 memcpy(&prime_mac, &phy_mac, 2245 sizeof(struct rte_ether_addr)); 2246 } 2247 } else if (rte_is_zero_ether_addr(&prime_mac)) { 2248 /* In case phys and prime, both are zero, create random MAC */ 2249 rte_eth_random_addr(prime_mac.addr_bytes); 2250 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, 2251 priv->token, 2252 prime_mac.addr_bytes); 2253 if (ret) { 2254 DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); 2255 goto cleanup; 2256 } 2257 } 2258 2259 /* prime_mac the final MAC address */ 2260 memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr)); 2261 return 0; 2262 2263 cleanup: 2264 return -1; 2265 } 2266 2267 static int 2268 check_devargs_handler(__rte_unused const char *key, const char *value, 2269 __rte_unused void *opaque) 2270 { 2271 if (strcmp(value, "1")) 2272 return -1; 2273 2274 return 0; 2275 } 2276 2277 static int 2278 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) 2279 { 2280 struct rte_kvargs *kvlist; 2281 2282 if (!devargs) 2283 return 0; 2284 2285 kvlist = rte_kvargs_parse(devargs->args, NULL); 2286 if (!kvlist) 2287 return 0; 2288 2289 if (!rte_kvargs_count(kvlist, key)) { 2290 rte_kvargs_free(kvlist); 2291 return 0; 2292 } 2293 2294 if (rte_kvargs_process(kvlist, key, 2295 check_devargs_handler, NULL) < 0) { 2296 rte_kvargs_free(kvlist); 2297 return 0; 2298 } 2299 rte_kvargs_free(kvlist); 2300 2301 return 1; 2302 } 2303 2304 static int 2305 dpaa2_dev_init(struct rte_eth_dev *eth_dev) 2306 { 2307 struct rte_device *dev = eth_dev->device; 2308 struct rte_dpaa2_device *dpaa2_dev; 2309 struct fsl_mc_io *dpni_dev; 2310 struct dpni_attr attr; 2311 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 2312 struct dpni_buffer_layout layout; 2313 int ret, hw_id, i; 2314 2315 PMD_INIT_FUNC_TRACE(); 2316 2317 dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); 2318 if (!dpni_dev) { 2319 DPAA2_PMD_ERR("Memory allocation failed for dpni device"); 2320 return -1; 2321 } 2322 dpni_dev->regs = rte_mcp_ptr_list[0]; 2323 eth_dev->process_private = (void *)dpni_dev; 2324 2325 /* For secondary processes, the primary has done all the work */ 2326 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2327 /* In case of secondary, only burst and ops API need to be 2328 * plugged. 2329 */ 2330 eth_dev->dev_ops = &dpaa2_ethdev_ops; 2331 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) 2332 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 2333 else if (dpaa2_get_devargs(dev->devargs, 2334 DRIVER_NO_PREFETCH_MODE)) 2335 eth_dev->rx_pkt_burst = dpaa2_dev_rx; 2336 else 2337 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2338 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 2339 return 0; 2340 } 2341 2342 dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); 2343 2344 hw_id = dpaa2_dev->object_id; 2345 ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); 2346 if (ret) { 2347 DPAA2_PMD_ERR( 2348 "Failure in opening dpni@%d with err code %d", 2349 hw_id, ret); 2350 rte_free(dpni_dev); 2351 return -1; 2352 } 2353 2354 /* Clean the device first */ 2355 ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); 2356 if (ret) { 2357 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", 2358 hw_id, ret); 2359 goto init_err; 2360 } 2361 2362 ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); 2363 if (ret) { 2364 DPAA2_PMD_ERR( 2365 "Failure in get dpni@%d attribute, err code %d", 2366 hw_id, ret); 2367 goto init_err; 2368 } 2369 2370 priv->num_rx_tc = attr.num_rx_tcs; 2371 /* only if the custom CG is enabled */ 2372 if (attr.options & DPNI_OPT_CUSTOM_CG) 2373 priv->max_cgs = attr.num_cgs; 2374 else 2375 priv->max_cgs = 0; 2376 2377 for (i = 0; i < priv->max_cgs; i++) 2378 priv->cgid_in_use[i] = 0; 2379 2380 for (i = 0; i < attr.num_rx_tcs; i++) 2381 priv->nb_rx_queues += attr.num_queues; 2382 2383 /* Using number of TX queues as number of TX TCs */ 2384 priv->nb_tx_queues = attr.num_tx_tcs; 2385 2386 DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", 2387 priv->num_rx_tc, priv->nb_rx_queues, 2388 priv->nb_tx_queues, priv->max_cgs); 2389 2390 priv->hw = dpni_dev; 2391 priv->hw_id = hw_id; 2392 priv->options = attr.options; 2393 priv->max_mac_filters = attr.mac_filter_entries; 2394 priv->max_vlan_filters = attr.vlan_filter_entries; 2395 priv->flags = 0; 2396 #if defined(RTE_LIBRTE_IEEE1588) 2397 priv->tx_conf_en = 1; 2398 #else 2399 priv->tx_conf_en = 0; 2400 #endif 2401 2402 /* Allocate memory for hardware structure for queues */ 2403 ret = dpaa2_alloc_rx_tx_queues(eth_dev); 2404 if (ret) { 2405 DPAA2_PMD_ERR("Queue allocation Failed"); 2406 goto init_err; 2407 } 2408 2409 /* Allocate memory for storing MAC addresses. 2410 * Table of mac_filter_entries size is allocated so that RTE ether lib 2411 * can add MAC entries when rte_eth_dev_mac_addr_add is called. 2412 */ 2413 eth_dev->data->mac_addrs = rte_zmalloc("dpni", 2414 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0); 2415 if (eth_dev->data->mac_addrs == NULL) { 2416 DPAA2_PMD_ERR( 2417 "Failed to allocate %d bytes needed to store MAC addresses", 2418 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries); 2419 ret = -ENOMEM; 2420 goto init_err; 2421 } 2422 2423 ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); 2424 if (ret) { 2425 DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); 2426 rte_free(eth_dev->data->mac_addrs); 2427 eth_dev->data->mac_addrs = NULL; 2428 goto init_err; 2429 } 2430 2431 /* ... tx buffer layout ... */ 2432 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 2433 if (priv->tx_conf_en) { 2434 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 2435 DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 2436 layout.pass_timestamp = true; 2437 } else { 2438 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2439 } 2440 layout.pass_frame_status = 1; 2441 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2442 DPNI_QUEUE_TX, &layout); 2443 if (ret) { 2444 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); 2445 goto init_err; 2446 } 2447 2448 /* ... tx-conf and error buffer layout ... */ 2449 memset(&layout, 0, sizeof(struct dpni_buffer_layout)); 2450 if (priv->tx_conf_en) { 2451 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 2452 DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 2453 layout.pass_timestamp = true; 2454 } else { 2455 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2456 } 2457 layout.pass_frame_status = 1; 2458 ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, 2459 DPNI_QUEUE_TX_CONFIRM, &layout); 2460 if (ret) { 2461 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", 2462 ret); 2463 goto init_err; 2464 } 2465 2466 eth_dev->dev_ops = &dpaa2_ethdev_ops; 2467 2468 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { 2469 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; 2470 DPAA2_PMD_INFO("Loopback mode"); 2471 } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) { 2472 eth_dev->rx_pkt_burst = dpaa2_dev_rx; 2473 DPAA2_PMD_INFO("No Prefetch mode"); 2474 } else { 2475 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; 2476 } 2477 eth_dev->tx_pkt_burst = dpaa2_dev_tx; 2478 2479 /*Init fields w.r.t. classficaition*/ 2480 memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg)); 2481 priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); 2482 if (!priv->extract.qos_extract_param) { 2483 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " 2484 " classificaiton ", ret); 2485 goto init_err; 2486 } 2487 for (i = 0; i < MAX_TCS; i++) { 2488 memset(&priv->extract.fs_key_cfg[i], 0, 2489 sizeof(struct dpkg_profile_cfg)); 2490 priv->extract.fs_extract_param[i] = 2491 (size_t)rte_malloc(NULL, 256, 64); 2492 if (!priv->extract.fs_extract_param[i]) { 2493 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton", 2494 ret); 2495 goto init_err; 2496 } 2497 } 2498 2499 ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, 2500 RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN 2501 + VLAN_TAG_SIZE); 2502 if (ret) { 2503 DPAA2_PMD_ERR("Unable to set mtu. check config"); 2504 goto init_err; 2505 } 2506 2507 /*TODO To enable soft parser support DPAA2 driver needs to integrate 2508 * with external entity to receive byte code for software sequence 2509 * and same will be offload to the H/W using MC interface. 2510 * Currently it is assumed that DPAA2 driver has byte code by some 2511 * mean and same if offloaded to H/W. 2512 */ 2513 if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { 2514 WRIOP_SS_INITIALIZER(priv); 2515 ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); 2516 if (ret < 0) { 2517 DPAA2_PMD_ERR(" Error(%d) in loading softparser\n", 2518 ret); 2519 return ret; 2520 } 2521 2522 ret = dpaa2_eth_enable_wriop_soft_parser(priv, 2523 DPNI_SS_INGRESS); 2524 if (ret < 0) { 2525 DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n", 2526 ret); 2527 return ret; 2528 } 2529 } 2530 RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); 2531 return 0; 2532 init_err: 2533 dpaa2_dev_uninit(eth_dev); 2534 return ret; 2535 } 2536 2537 static int 2538 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) 2539 { 2540 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; 2541 struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private; 2542 int i, ret; 2543 2544 PMD_INIT_FUNC_TRACE(); 2545 2546 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2547 return 0; 2548 2549 if (!dpni) { 2550 DPAA2_PMD_WARN("Already closed or not started"); 2551 return -1; 2552 } 2553 2554 dpaa2_dev_close(eth_dev); 2555 2556 dpaa2_free_rx_tx_queues(eth_dev); 2557 2558 /* Close the device at underlying layer*/ 2559 ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); 2560 if (ret) { 2561 DPAA2_PMD_ERR( 2562 "Failure closing dpni device with err code %d", 2563 ret); 2564 } 2565 2566 /* Free the allocated memory for ethernet private data and dpni*/ 2567 priv->hw = NULL; 2568 eth_dev->process_private = NULL; 2569 rte_free(dpni); 2570 2571 for (i = 0; i < MAX_TCS; i++) { 2572 if (priv->extract.fs_extract_param[i]) 2573 rte_free((void *)(size_t)priv->extract.fs_extract_param[i]); 2574 } 2575 2576 if (priv->extract.qos_extract_param) 2577 rte_free((void *)(size_t)priv->extract.qos_extract_param); 2578 2579 eth_dev->dev_ops = NULL; 2580 eth_dev->rx_pkt_burst = NULL; 2581 eth_dev->tx_pkt_burst = NULL; 2582 2583 DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); 2584 return 0; 2585 } 2586 2587 static int 2588 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, 2589 struct rte_dpaa2_device *dpaa2_dev) 2590 { 2591 struct rte_eth_dev *eth_dev; 2592 struct dpaa2_dev_priv *dev_priv; 2593 int diag; 2594 2595 if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > 2596 RTE_PKTMBUF_HEADROOM) { 2597 DPAA2_PMD_ERR( 2598 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", 2599 RTE_PKTMBUF_HEADROOM, 2600 DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); 2601 2602 return -1; 2603 } 2604 2605 if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 2606 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); 2607 if (!eth_dev) 2608 return -ENODEV; 2609 dev_priv = rte_zmalloc("ethdev private structure", 2610 sizeof(struct dpaa2_dev_priv), 2611 RTE_CACHE_LINE_SIZE); 2612 if (dev_priv == NULL) { 2613 DPAA2_PMD_CRIT( 2614 "Unable to allocate memory for private data"); 2615 rte_eth_dev_release_port(eth_dev); 2616 return -ENOMEM; 2617 } 2618 eth_dev->data->dev_private = (void *)dev_priv; 2619 /* Store a pointer to eth_dev in dev_private */ 2620 dev_priv->eth_dev = eth_dev; 2621 dev_priv->tx_conf_en = 0; 2622 } else { 2623 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); 2624 if (!eth_dev) { 2625 DPAA2_PMD_DEBUG("returning enodev"); 2626 return -ENODEV; 2627 } 2628 } 2629 2630 eth_dev->device = &dpaa2_dev->device; 2631 2632 dpaa2_dev->eth_dev = eth_dev; 2633 eth_dev->data->rx_mbuf_alloc_failed = 0; 2634 2635 if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) 2636 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; 2637 2638 /* Invoke PMD device initialization function */ 2639 diag = dpaa2_dev_init(eth_dev); 2640 if (diag == 0) { 2641 rte_eth_dev_probing_finish(eth_dev); 2642 return 0; 2643 } 2644 2645 rte_eth_dev_release_port(eth_dev); 2646 return diag; 2647 } 2648 2649 static int 2650 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) 2651 { 2652 struct rte_eth_dev *eth_dev; 2653 2654 eth_dev = dpaa2_dev->eth_dev; 2655 dpaa2_dev_uninit(eth_dev); 2656 2657 rte_eth_dev_release_port(eth_dev); 2658 2659 return 0; 2660 } 2661 2662 static struct rte_dpaa2_driver rte_dpaa2_pmd = { 2663 .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, 2664 .drv_type = DPAA2_ETH, 2665 .probe = rte_dpaa2_probe, 2666 .remove = rte_dpaa2_remove, 2667 }; 2668 2669 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); 2670 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2, 2671 DRIVER_LOOPBACK_MODE "=<int> " 2672 DRIVER_NO_PREFETCH_MODE "=<int>"); 2673 RTE_INIT(dpaa2_pmd_init_log) 2674 { 2675 dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); 2676 if (dpaa2_logtype_pmd >= 0) 2677 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); 2678 } 2679