1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <rte_string_fns.h> 6 #include <ethdev_driver.h> 7 #include <ethdev_pci.h> 8 #include <rte_cycles.h> 9 #include <rte_malloc.h> 10 #include <rte_alarm.h> 11 #include <rte_ether.h> 12 13 #include "lio_logs.h" 14 #include "lio_23xx_vf.h" 15 #include "lio_ethdev.h" 16 #include "lio_rxtx.h" 17 18 /* Default RSS key in use */ 19 static uint8_t lio_rss_key[40] = { 20 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 21 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 22 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 23 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 24 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, 25 }; 26 27 static const struct rte_eth_desc_lim lio_rx_desc_lim = { 28 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS, 29 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS, 30 .nb_align = 1, 31 }; 32 33 static const struct rte_eth_desc_lim lio_tx_desc_lim = { 34 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS, 35 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS, 36 .nb_align = 1, 37 }; 38 39 /* Wait for control command to reach nic. */ 40 static uint16_t 41 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev, 42 struct lio_dev_ctrl_cmd *ctrl_cmd) 43 { 44 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 45 46 while ((ctrl_cmd->cond == 0) && --timeout) { 47 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 48 rte_delay_ms(1); 49 } 50 51 return !timeout; 52 } 53 54 /** 55 * \brief Send Rx control command 56 * @param eth_dev Pointer to the structure rte_eth_dev 57 * @param start_stop whether to start or stop 58 */ 59 static int 60 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop) 61 { 62 struct lio_device *lio_dev = LIO_DEV(eth_dev); 63 struct lio_dev_ctrl_cmd ctrl_cmd; 64 struct lio_ctrl_pkt ctrl_pkt; 65 66 /* flush added to prevent cmd failure 67 * incase the queue is full 68 */ 69 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 70 71 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 72 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 73 74 ctrl_cmd.eth_dev = eth_dev; 75 ctrl_cmd.cond = 0; 76 77 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL; 78 ctrl_pkt.ncmd.s.param1 = start_stop; 79 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 80 81 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 82 lio_dev_err(lio_dev, "Failed to send RX Control message\n"); 83 return -1; 84 } 85 86 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 87 lio_dev_err(lio_dev, "RX Control command timed out\n"); 88 return -1; 89 } 90 91 return 0; 92 } 93 94 /* store statistics names and its offset in stats structure */ 95 struct rte_lio_xstats_name_off { 96 char name[RTE_ETH_XSTATS_NAME_SIZE]; 97 unsigned int offset; 98 }; 99 100 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = { 101 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)}, 102 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)}, 103 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)}, 104 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)}, 105 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)}, 106 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)}, 107 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)}, 108 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)}, 109 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)}, 110 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)}, 111 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)}, 112 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)}, 113 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)}, 114 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) + 115 sizeof(struct octeon_rx_stats)}, 116 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) + 117 sizeof(struct octeon_rx_stats)}, 118 {"tx_broadcast_pkts", 119 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) + 120 sizeof(struct octeon_rx_stats)}, 121 {"tx_multicast_pkts", 122 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) + 123 sizeof(struct octeon_rx_stats)}, 124 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) + 125 sizeof(struct octeon_rx_stats)}, 126 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) + 127 sizeof(struct octeon_rx_stats)}, 128 {"tx_total_collisions", (offsetof(struct octeon_tx_stats, 129 total_collisions)) + 130 sizeof(struct octeon_rx_stats)}, 131 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) + 132 sizeof(struct octeon_rx_stats)}, 133 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) + 134 sizeof(struct octeon_rx_stats)}, 135 }; 136 137 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings) 138 139 /* Get hw stats of the port */ 140 static int 141 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, 142 unsigned int n) 143 { 144 struct lio_device *lio_dev = LIO_DEV(eth_dev); 145 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 146 struct octeon_link_stats *hw_stats; 147 struct lio_link_stats_resp *resp; 148 struct lio_soft_command *sc; 149 uint32_t resp_size; 150 unsigned int i; 151 int retval; 152 153 if (!lio_dev->intf_open) { 154 lio_dev_err(lio_dev, "Port %d down\n", 155 lio_dev->port_id); 156 return -EINVAL; 157 } 158 159 if (n < LIO_NB_XSTATS) 160 return LIO_NB_XSTATS; 161 162 resp_size = sizeof(struct lio_link_stats_resp); 163 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 164 if (sc == NULL) 165 return -ENOMEM; 166 167 resp = (struct lio_link_stats_resp *)sc->virtrptr; 168 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 169 LIO_OPCODE_PORT_STATS, 0, 0, 0); 170 171 /* Setting wait time in seconds */ 172 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 173 174 retval = lio_send_soft_command(lio_dev, sc); 175 if (retval == LIO_IQ_SEND_FAILED) { 176 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n", 177 retval); 178 goto get_stats_fail; 179 } 180 181 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 182 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 183 lio_process_ordered_list(lio_dev); 184 rte_delay_ms(1); 185 } 186 187 retval = resp->status; 188 if (retval) { 189 lio_dev_err(lio_dev, "failed to get port stats from firmware\n"); 190 goto get_stats_fail; 191 } 192 193 lio_swap_8B_data((uint64_t *)(&resp->link_stats), 194 sizeof(struct octeon_link_stats) >> 3); 195 196 hw_stats = &resp->link_stats; 197 198 for (i = 0; i < LIO_NB_XSTATS; i++) { 199 xstats[i].id = i; 200 xstats[i].value = 201 *(uint64_t *)(((char *)hw_stats) + 202 rte_lio_stats_strings[i].offset); 203 } 204 205 lio_free_soft_command(sc); 206 207 return LIO_NB_XSTATS; 208 209 get_stats_fail: 210 lio_free_soft_command(sc); 211 212 return -1; 213 } 214 215 static int 216 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev, 217 struct rte_eth_xstat_name *xstats_names, 218 unsigned limit __rte_unused) 219 { 220 struct lio_device *lio_dev = LIO_DEV(eth_dev); 221 unsigned int i; 222 223 if (!lio_dev->intf_open) { 224 lio_dev_err(lio_dev, "Port %d down\n", 225 lio_dev->port_id); 226 return -EINVAL; 227 } 228 229 if (xstats_names == NULL) 230 return LIO_NB_XSTATS; 231 232 /* Note: limit checked in rte_eth_xstats_names() */ 233 234 for (i = 0; i < LIO_NB_XSTATS; i++) { 235 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), 236 "%s", rte_lio_stats_strings[i].name); 237 } 238 239 return LIO_NB_XSTATS; 240 } 241 242 /* Reset hw stats for the port */ 243 static int 244 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev) 245 { 246 struct lio_device *lio_dev = LIO_DEV(eth_dev); 247 struct lio_dev_ctrl_cmd ctrl_cmd; 248 struct lio_ctrl_pkt ctrl_pkt; 249 int ret; 250 251 if (!lio_dev->intf_open) { 252 lio_dev_err(lio_dev, "Port %d down\n", 253 lio_dev->port_id); 254 return -EINVAL; 255 } 256 257 /* flush added to prevent cmd failure 258 * incase the queue is full 259 */ 260 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 261 262 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 263 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 264 265 ctrl_cmd.eth_dev = eth_dev; 266 ctrl_cmd.cond = 0; 267 268 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS; 269 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 270 271 ret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt); 272 if (ret != 0) { 273 lio_dev_err(lio_dev, "Failed to send clear stats command\n"); 274 return ret; 275 } 276 277 ret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd); 278 if (ret != 0) { 279 lio_dev_err(lio_dev, "Clear stats command timed out\n"); 280 return ret; 281 } 282 283 /* clear stored per queue stats */ 284 RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0); 285 return (*eth_dev->dev_ops->stats_reset)(eth_dev); 286 } 287 288 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */ 289 static int 290 lio_dev_stats_get(struct rte_eth_dev *eth_dev, 291 struct rte_eth_stats *stats) 292 { 293 struct lio_device *lio_dev = LIO_DEV(eth_dev); 294 struct lio_droq_stats *oq_stats; 295 struct lio_iq_stats *iq_stats; 296 struct lio_instr_queue *txq; 297 struct lio_droq *droq; 298 int i, iq_no, oq_no; 299 uint64_t bytes = 0; 300 uint64_t pkts = 0; 301 uint64_t drop = 0; 302 303 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 304 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 305 txq = lio_dev->instr_queue[iq_no]; 306 if (txq != NULL) { 307 iq_stats = &txq->stats; 308 pkts += iq_stats->tx_done; 309 drop += iq_stats->tx_dropped; 310 bytes += iq_stats->tx_tot_bytes; 311 } 312 } 313 314 stats->opackets = pkts; 315 stats->obytes = bytes; 316 stats->oerrors = drop; 317 318 pkts = 0; 319 drop = 0; 320 bytes = 0; 321 322 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 323 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 324 droq = lio_dev->droq[oq_no]; 325 if (droq != NULL) { 326 oq_stats = &droq->stats; 327 pkts += oq_stats->rx_pkts_received; 328 drop += (oq_stats->rx_dropped + 329 oq_stats->dropped_toomany + 330 oq_stats->dropped_nomem); 331 bytes += oq_stats->rx_bytes_received; 332 } 333 } 334 stats->ibytes = bytes; 335 stats->ipackets = pkts; 336 stats->ierrors = drop; 337 338 return 0; 339 } 340 341 static int 342 lio_dev_stats_reset(struct rte_eth_dev *eth_dev) 343 { 344 struct lio_device *lio_dev = LIO_DEV(eth_dev); 345 struct lio_droq_stats *oq_stats; 346 struct lio_iq_stats *iq_stats; 347 struct lio_instr_queue *txq; 348 struct lio_droq *droq; 349 int i, iq_no, oq_no; 350 351 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 352 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 353 txq = lio_dev->instr_queue[iq_no]; 354 if (txq != NULL) { 355 iq_stats = &txq->stats; 356 memset(iq_stats, 0, sizeof(struct lio_iq_stats)); 357 } 358 } 359 360 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 361 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 362 droq = lio_dev->droq[oq_no]; 363 if (droq != NULL) { 364 oq_stats = &droq->stats; 365 memset(oq_stats, 0, sizeof(struct lio_droq_stats)); 366 } 367 } 368 369 return 0; 370 } 371 372 static int 373 lio_dev_info_get(struct rte_eth_dev *eth_dev, 374 struct rte_eth_dev_info *devinfo) 375 { 376 struct lio_device *lio_dev = LIO_DEV(eth_dev); 377 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 378 379 switch (pci_dev->id.subsystem_device_id) { 380 /* CN23xx 10G cards */ 381 case PCI_SUBSYS_DEV_ID_CN2350_210: 382 case PCI_SUBSYS_DEV_ID_CN2360_210: 383 case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3: 384 case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3: 385 case PCI_SUBSYS_DEV_ID_CN2350_210SVPT: 386 case PCI_SUBSYS_DEV_ID_CN2360_210SVPT: 387 devinfo->speed_capa = ETH_LINK_SPEED_10G; 388 break; 389 /* CN23xx 25G cards */ 390 case PCI_SUBSYS_DEV_ID_CN2350_225: 391 case PCI_SUBSYS_DEV_ID_CN2360_225: 392 devinfo->speed_capa = ETH_LINK_SPEED_25G; 393 break; 394 default: 395 devinfo->speed_capa = ETH_LINK_SPEED_10G; 396 lio_dev_err(lio_dev, 397 "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n"); 398 return -EINVAL; 399 } 400 401 devinfo->max_rx_queues = lio_dev->max_rx_queues; 402 devinfo->max_tx_queues = lio_dev->max_tx_queues; 403 404 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE; 405 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN; 406 407 devinfo->max_mac_addrs = 1; 408 409 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 410 DEV_RX_OFFLOAD_UDP_CKSUM | 411 DEV_RX_OFFLOAD_TCP_CKSUM | 412 DEV_RX_OFFLOAD_VLAN_STRIP | 413 DEV_RX_OFFLOAD_RSS_HASH); 414 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM | 415 DEV_TX_OFFLOAD_UDP_CKSUM | 416 DEV_TX_OFFLOAD_TCP_CKSUM | 417 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); 418 419 devinfo->rx_desc_lim = lio_rx_desc_lim; 420 devinfo->tx_desc_lim = lio_tx_desc_lim; 421 422 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ; 423 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ; 424 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 | 425 ETH_RSS_NONFRAG_IPV4_TCP | 426 ETH_RSS_IPV6 | 427 ETH_RSS_NONFRAG_IPV6_TCP | 428 ETH_RSS_IPV6_EX | 429 ETH_RSS_IPV6_TCP_EX); 430 return 0; 431 } 432 433 static int 434 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 435 { 436 struct lio_device *lio_dev = LIO_DEV(eth_dev); 437 uint16_t pf_mtu = lio_dev->linfo.link.s.mtu; 438 uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 439 struct lio_dev_ctrl_cmd ctrl_cmd; 440 struct lio_ctrl_pkt ctrl_pkt; 441 442 PMD_INIT_FUNC_TRACE(); 443 444 if (!lio_dev->intf_open) { 445 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n", 446 lio_dev->port_id); 447 return -EINVAL; 448 } 449 450 /* check if VF MTU is within allowed range. 451 * New value should not exceed PF MTU. 452 */ 453 if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) { 454 lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n", 455 RTE_ETHER_MIN_MTU, pf_mtu); 456 return -EINVAL; 457 } 458 459 /* flush added to prevent cmd failure 460 * incase the queue is full 461 */ 462 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 463 464 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 465 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 466 467 ctrl_cmd.eth_dev = eth_dev; 468 ctrl_cmd.cond = 0; 469 470 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU; 471 ctrl_pkt.ncmd.s.param1 = mtu; 472 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 473 474 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 475 lio_dev_err(lio_dev, "Failed to send command to change MTU\n"); 476 return -1; 477 } 478 479 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 480 lio_dev_err(lio_dev, "Command to change MTU timed out\n"); 481 return -1; 482 } 483 484 if (frame_len > LIO_ETH_MAX_LEN) 485 eth_dev->data->dev_conf.rxmode.offloads |= 486 DEV_RX_OFFLOAD_JUMBO_FRAME; 487 else 488 eth_dev->data->dev_conf.rxmode.offloads &= 489 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 490 491 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; 492 eth_dev->data->mtu = mtu; 493 494 return 0; 495 } 496 497 static int 498 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev, 499 struct rte_eth_rss_reta_entry64 *reta_conf, 500 uint16_t reta_size) 501 { 502 struct lio_device *lio_dev = LIO_DEV(eth_dev); 503 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 504 struct lio_rss_set *rss_param; 505 struct lio_dev_ctrl_cmd ctrl_cmd; 506 struct lio_ctrl_pkt ctrl_pkt; 507 int i, j, index; 508 509 if (!lio_dev->intf_open) { 510 lio_dev_err(lio_dev, "Port %d down, can't update reta\n", 511 lio_dev->port_id); 512 return -EINVAL; 513 } 514 515 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 516 lio_dev_err(lio_dev, 517 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 518 reta_size, LIO_RSS_MAX_TABLE_SZ); 519 return -EINVAL; 520 } 521 522 /* flush added to prevent cmd failure 523 * incase the queue is full 524 */ 525 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 526 527 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 528 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 529 530 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 531 532 ctrl_cmd.eth_dev = eth_dev; 533 ctrl_cmd.cond = 0; 534 535 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 536 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 537 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 538 539 rss_param->param.flags = 0xF; 540 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED; 541 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ; 542 543 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) { 544 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) { 545 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) { 546 index = (i * RTE_RETA_GROUP_SIZE) + j; 547 rss_state->itable[index] = reta_conf[i].reta[j]; 548 } 549 } 550 } 551 552 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ; 553 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size); 554 555 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 556 557 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 558 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 559 return -1; 560 } 561 562 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 563 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 564 return -1; 565 } 566 567 return 0; 568 } 569 570 static int 571 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev, 572 struct rte_eth_rss_reta_entry64 *reta_conf, 573 uint16_t reta_size) 574 { 575 struct lio_device *lio_dev = LIO_DEV(eth_dev); 576 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 577 int i, num; 578 579 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 580 lio_dev_err(lio_dev, 581 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 582 reta_size, LIO_RSS_MAX_TABLE_SZ); 583 return -EINVAL; 584 } 585 586 num = reta_size / RTE_RETA_GROUP_SIZE; 587 588 for (i = 0; i < num; i++) { 589 memcpy(reta_conf->reta, 590 &rss_state->itable[i * RTE_RETA_GROUP_SIZE], 591 RTE_RETA_GROUP_SIZE); 592 reta_conf++; 593 } 594 595 return 0; 596 } 597 598 static int 599 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 600 struct rte_eth_rss_conf *rss_conf) 601 { 602 struct lio_device *lio_dev = LIO_DEV(eth_dev); 603 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 604 uint8_t *hash_key = NULL; 605 uint64_t rss_hf = 0; 606 607 if (rss_state->hash_disable) { 608 lio_dev_info(lio_dev, "RSS disabled in nic\n"); 609 rss_conf->rss_hf = 0; 610 return 0; 611 } 612 613 /* Get key value */ 614 hash_key = rss_conf->rss_key; 615 if (hash_key != NULL) 616 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size); 617 618 if (rss_state->ip) 619 rss_hf |= ETH_RSS_IPV4; 620 if (rss_state->tcp_hash) 621 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 622 if (rss_state->ipv6) 623 rss_hf |= ETH_RSS_IPV6; 624 if (rss_state->ipv6_tcp_hash) 625 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 626 if (rss_state->ipv6_ex) 627 rss_hf |= ETH_RSS_IPV6_EX; 628 if (rss_state->ipv6_tcp_ex_hash) 629 rss_hf |= ETH_RSS_IPV6_TCP_EX; 630 631 rss_conf->rss_hf = rss_hf; 632 633 return 0; 634 } 635 636 static int 637 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev, 638 struct rte_eth_rss_conf *rss_conf) 639 { 640 struct lio_device *lio_dev = LIO_DEV(eth_dev); 641 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 642 struct lio_rss_set *rss_param; 643 struct lio_dev_ctrl_cmd ctrl_cmd; 644 struct lio_ctrl_pkt ctrl_pkt; 645 646 if (!lio_dev->intf_open) { 647 lio_dev_err(lio_dev, "Port %d down, can't update hash\n", 648 lio_dev->port_id); 649 return -EINVAL; 650 } 651 652 /* flush added to prevent cmd failure 653 * incase the queue is full 654 */ 655 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 656 657 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 658 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 659 660 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 661 662 ctrl_cmd.eth_dev = eth_dev; 663 ctrl_cmd.cond = 0; 664 665 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 666 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 667 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 668 669 rss_param->param.flags = 0xF; 670 671 if (rss_conf->rss_key) { 672 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED; 673 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ; 674 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ; 675 memcpy(rss_state->hash_key, rss_conf->rss_key, 676 rss_state->hash_key_size); 677 memcpy(rss_param->key, rss_state->hash_key, 678 rss_state->hash_key_size); 679 } 680 681 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 682 /* Can't disable rss through hash flags, 683 * if it is enabled by default during init 684 */ 685 if (!rss_state->hash_disable) 686 return -EINVAL; 687 688 /* This is for --disable-rss during testpmd launch */ 689 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS; 690 } else { 691 uint32_t hashinfo = 0; 692 693 /* Can't enable rss if disabled by default during init */ 694 if (rss_state->hash_disable) 695 return -EINVAL; 696 697 if (rss_conf->rss_hf & ETH_RSS_IPV4) { 698 hashinfo |= LIO_RSS_HASH_IPV4; 699 rss_state->ip = 1; 700 } else { 701 rss_state->ip = 0; 702 } 703 704 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 705 hashinfo |= LIO_RSS_HASH_TCP_IPV4; 706 rss_state->tcp_hash = 1; 707 } else { 708 rss_state->tcp_hash = 0; 709 } 710 711 if (rss_conf->rss_hf & ETH_RSS_IPV6) { 712 hashinfo |= LIO_RSS_HASH_IPV6; 713 rss_state->ipv6 = 1; 714 } else { 715 rss_state->ipv6 = 0; 716 } 717 718 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { 719 hashinfo |= LIO_RSS_HASH_TCP_IPV6; 720 rss_state->ipv6_tcp_hash = 1; 721 } else { 722 rss_state->ipv6_tcp_hash = 0; 723 } 724 725 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) { 726 hashinfo |= LIO_RSS_HASH_IPV6_EX; 727 rss_state->ipv6_ex = 1; 728 } else { 729 rss_state->ipv6_ex = 0; 730 } 731 732 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) { 733 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX; 734 rss_state->ipv6_tcp_ex_hash = 1; 735 } else { 736 rss_state->ipv6_tcp_ex_hash = 0; 737 } 738 739 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED; 740 rss_param->param.hashinfo = hashinfo; 741 } 742 743 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 744 745 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 746 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 747 return -1; 748 } 749 750 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 751 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 752 return -1; 753 } 754 755 return 0; 756 } 757 758 /** 759 * Add vxlan dest udp port for an interface. 760 * 761 * @param eth_dev 762 * Pointer to the structure rte_eth_dev 763 * @param udp_tnl 764 * udp tunnel conf 765 * 766 * @return 767 * On success return 0 768 * On failure return -1 769 */ 770 static int 771 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev, 772 struct rte_eth_udp_tunnel *udp_tnl) 773 { 774 struct lio_device *lio_dev = LIO_DEV(eth_dev); 775 struct lio_dev_ctrl_cmd ctrl_cmd; 776 struct lio_ctrl_pkt ctrl_pkt; 777 778 if (udp_tnl == NULL) 779 return -EINVAL; 780 781 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { 782 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 783 return -1; 784 } 785 786 /* flush added to prevent cmd failure 787 * incase the queue is full 788 */ 789 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 790 791 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 792 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 793 794 ctrl_cmd.eth_dev = eth_dev; 795 ctrl_cmd.cond = 0; 796 797 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 798 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 799 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD; 800 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 801 802 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 803 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n"); 804 return -1; 805 } 806 807 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 808 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n"); 809 return -1; 810 } 811 812 return 0; 813 } 814 815 /** 816 * Remove vxlan dest udp port for an interface. 817 * 818 * @param eth_dev 819 * Pointer to the structure rte_eth_dev 820 * @param udp_tnl 821 * udp tunnel conf 822 * 823 * @return 824 * On success return 0 825 * On failure return -1 826 */ 827 static int 828 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev, 829 struct rte_eth_udp_tunnel *udp_tnl) 830 { 831 struct lio_device *lio_dev = LIO_DEV(eth_dev); 832 struct lio_dev_ctrl_cmd ctrl_cmd; 833 struct lio_ctrl_pkt ctrl_pkt; 834 835 if (udp_tnl == NULL) 836 return -EINVAL; 837 838 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { 839 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 840 return -1; 841 } 842 843 /* flush added to prevent cmd failure 844 * incase the queue is full 845 */ 846 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 847 848 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 849 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 850 851 ctrl_cmd.eth_dev = eth_dev; 852 ctrl_cmd.cond = 0; 853 854 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 855 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 856 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL; 857 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 858 859 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 860 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n"); 861 return -1; 862 } 863 864 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 865 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n"); 866 return -1; 867 } 868 869 return 0; 870 } 871 872 static int 873 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) 874 { 875 struct lio_device *lio_dev = LIO_DEV(eth_dev); 876 struct lio_dev_ctrl_cmd ctrl_cmd; 877 struct lio_ctrl_pkt ctrl_pkt; 878 879 if (lio_dev->linfo.vlan_is_admin_assigned) 880 return -EPERM; 881 882 /* flush added to prevent cmd failure 883 * incase the queue is full 884 */ 885 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 886 887 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 888 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 889 890 ctrl_cmd.eth_dev = eth_dev; 891 ctrl_cmd.cond = 0; 892 893 ctrl_pkt.ncmd.s.cmd = on ? 894 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER; 895 ctrl_pkt.ncmd.s.param1 = vlan_id; 896 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 897 898 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 899 lio_dev_err(lio_dev, "Failed to %s VLAN port\n", 900 on ? "add" : "remove"); 901 return -1; 902 } 903 904 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 905 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n", 906 on ? "add" : "remove"); 907 return -1; 908 } 909 910 return 0; 911 } 912 913 static uint64_t 914 lio_hweight64(uint64_t w) 915 { 916 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul); 917 918 res = 919 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); 920 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; 921 res = res + (res >> 8); 922 res = res + (res >> 16); 923 924 return (res + (res >> 32)) & 0x00000000000000FFul; 925 } 926 927 static int 928 lio_dev_link_update(struct rte_eth_dev *eth_dev, 929 int wait_to_complete __rte_unused) 930 { 931 struct lio_device *lio_dev = LIO_DEV(eth_dev); 932 struct rte_eth_link link; 933 934 /* Initialize */ 935 memset(&link, 0, sizeof(link)); 936 link.link_status = ETH_LINK_DOWN; 937 link.link_speed = ETH_SPEED_NUM_NONE; 938 link.link_duplex = ETH_LINK_HALF_DUPLEX; 939 link.link_autoneg = ETH_LINK_AUTONEG; 940 941 /* Return what we found */ 942 if (lio_dev->linfo.link.s.link_up == 0) { 943 /* Interface is down */ 944 return rte_eth_linkstatus_set(eth_dev, &link); 945 } 946 947 link.link_status = ETH_LINK_UP; /* Interface is up */ 948 link.link_duplex = ETH_LINK_FULL_DUPLEX; 949 switch (lio_dev->linfo.link.s.speed) { 950 case LIO_LINK_SPEED_10000: 951 link.link_speed = ETH_SPEED_NUM_10G; 952 break; 953 case LIO_LINK_SPEED_25000: 954 link.link_speed = ETH_SPEED_NUM_25G; 955 break; 956 default: 957 link.link_speed = ETH_SPEED_NUM_NONE; 958 link.link_duplex = ETH_LINK_HALF_DUPLEX; 959 } 960 961 return rte_eth_linkstatus_set(eth_dev, &link); 962 } 963 964 /** 965 * \brief Net device enable, disable allmulticast 966 * @param eth_dev Pointer to the structure rte_eth_dev 967 * 968 * @return 969 * On success return 0 970 * On failure return negative errno 971 */ 972 static int 973 lio_change_dev_flag(struct rte_eth_dev *eth_dev) 974 { 975 struct lio_device *lio_dev = LIO_DEV(eth_dev); 976 struct lio_dev_ctrl_cmd ctrl_cmd; 977 struct lio_ctrl_pkt ctrl_pkt; 978 979 /* flush added to prevent cmd failure 980 * incase the queue is full 981 */ 982 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 983 984 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 985 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 986 987 ctrl_cmd.eth_dev = eth_dev; 988 ctrl_cmd.cond = 0; 989 990 /* Create a ctrl pkt command to be sent to core app. */ 991 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS; 992 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags; 993 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 994 995 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 996 lio_dev_err(lio_dev, "Failed to send change flag message\n"); 997 return -EAGAIN; 998 } 999 1000 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 1001 lio_dev_err(lio_dev, "Change dev flag command timed out\n"); 1002 return -ETIMEDOUT; 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int 1009 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 1010 { 1011 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1012 1013 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 1014 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1015 LIO_VF_TRUST_MIN_VERSION); 1016 return -EAGAIN; 1017 } 1018 1019 if (!lio_dev->intf_open) { 1020 lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n", 1021 lio_dev->port_id); 1022 return -EAGAIN; 1023 } 1024 1025 lio_dev->ifflags |= LIO_IFFLAG_PROMISC; 1026 return lio_change_dev_flag(eth_dev); 1027 } 1028 1029 static int 1030 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 1031 { 1032 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1033 1034 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 1035 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1036 LIO_VF_TRUST_MIN_VERSION); 1037 return -EAGAIN; 1038 } 1039 1040 if (!lio_dev->intf_open) { 1041 lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n", 1042 lio_dev->port_id); 1043 return -EAGAIN; 1044 } 1045 1046 lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC; 1047 return lio_change_dev_flag(eth_dev); 1048 } 1049 1050 static int 1051 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 1052 { 1053 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1054 1055 if (!lio_dev->intf_open) { 1056 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n", 1057 lio_dev->port_id); 1058 return -EAGAIN; 1059 } 1060 1061 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI; 1062 return lio_change_dev_flag(eth_dev); 1063 } 1064 1065 static int 1066 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 1067 { 1068 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1069 1070 if (!lio_dev->intf_open) { 1071 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n", 1072 lio_dev->port_id); 1073 return -EAGAIN; 1074 } 1075 1076 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI; 1077 return lio_change_dev_flag(eth_dev); 1078 } 1079 1080 static void 1081 lio_dev_rss_configure(struct rte_eth_dev *eth_dev) 1082 { 1083 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1084 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1085 struct rte_eth_rss_reta_entry64 reta_conf[8]; 1086 struct rte_eth_rss_conf rss_conf; 1087 uint16_t i; 1088 1089 /* Configure the RSS key and the RSS protocols used to compute 1090 * the RSS hash of input packets. 1091 */ 1092 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1093 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 1094 rss_state->hash_disable = 1; 1095 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1096 return; 1097 } 1098 1099 if (rss_conf.rss_key == NULL) 1100 rss_conf.rss_key = lio_rss_key; /* Default hash key */ 1101 1102 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1103 1104 memset(reta_conf, 0, sizeof(reta_conf)); 1105 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) { 1106 uint8_t q_idx, conf_idx, reta_idx; 1107 1108 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ? 1109 i % eth_dev->data->nb_rx_queues : 0); 1110 conf_idx = i / RTE_RETA_GROUP_SIZE; 1111 reta_idx = i % RTE_RETA_GROUP_SIZE; 1112 reta_conf[conf_idx].reta[reta_idx] = q_idx; 1113 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx); 1114 } 1115 1116 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ); 1117 } 1118 1119 static void 1120 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev) 1121 { 1122 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1123 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1124 struct rte_eth_rss_conf rss_conf; 1125 1126 switch (eth_dev->data->dev_conf.rxmode.mq_mode) { 1127 case ETH_MQ_RX_RSS: 1128 lio_dev_rss_configure(eth_dev); 1129 break; 1130 case ETH_MQ_RX_NONE: 1131 /* if mq_mode is none, disable rss mode. */ 1132 default: 1133 memset(&rss_conf, 0, sizeof(rss_conf)); 1134 rss_state->hash_disable = 1; 1135 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1136 } 1137 } 1138 1139 /** 1140 * Setup our receive queue/ringbuffer. This is the 1141 * queue the Octeon uses to send us packets and 1142 * responses. We are given a memory pool for our 1143 * packet buffers that are used to populate the receive 1144 * queue. 1145 * 1146 * @param eth_dev 1147 * Pointer to the structure rte_eth_dev 1148 * @param q_no 1149 * Queue number 1150 * @param num_rx_descs 1151 * Number of entries in the queue 1152 * @param socket_id 1153 * Where to allocate memory 1154 * @param rx_conf 1155 * Pointer to the struction rte_eth_rxconf 1156 * @param mp 1157 * Pointer to the packet pool 1158 * 1159 * @return 1160 * - On success, return 0 1161 * - On failure, return -1 1162 */ 1163 static int 1164 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1165 uint16_t num_rx_descs, unsigned int socket_id, 1166 const struct rte_eth_rxconf *rx_conf __rte_unused, 1167 struct rte_mempool *mp) 1168 { 1169 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1170 struct rte_pktmbuf_pool_private *mbp_priv; 1171 uint32_t fw_mapped_oq; 1172 uint16_t buf_size; 1173 1174 if (q_no >= lio_dev->nb_rx_queues) { 1175 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no); 1176 return -EINVAL; 1177 } 1178 1179 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no); 1180 1181 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no; 1182 1183 /* Free previous allocation if any */ 1184 if (eth_dev->data->rx_queues[q_no] != NULL) { 1185 lio_dev_rx_queue_release(eth_dev, q_no); 1186 eth_dev->data->rx_queues[q_no] = NULL; 1187 } 1188 1189 mbp_priv = rte_mempool_get_priv(mp); 1190 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1191 1192 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp, 1193 socket_id)) { 1194 lio_dev_err(lio_dev, "droq allocation failed\n"); 1195 return -1; 1196 } 1197 1198 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq]; 1199 1200 return 0; 1201 } 1202 1203 /** 1204 * Release the receive queue/ringbuffer. Called by 1205 * the upper layers. 1206 * 1207 * @param eth_dev 1208 * Pointer to Ethernet device structure. 1209 * @param q_no 1210 * Receive queue index. 1211 * 1212 * @return 1213 * - nothing 1214 */ 1215 void 1216 lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no) 1217 { 1218 struct lio_droq *droq = dev->data->rx_queues[q_no]; 1219 int oq_no; 1220 1221 if (droq) { 1222 oq_no = droq->q_no; 1223 lio_delete_droq_queue(droq->lio_dev, oq_no); 1224 } 1225 } 1226 1227 /** 1228 * Allocate and initialize SW ring. Initialize associated HW registers. 1229 * 1230 * @param eth_dev 1231 * Pointer to structure rte_eth_dev 1232 * 1233 * @param q_no 1234 * Queue number 1235 * 1236 * @param num_tx_descs 1237 * Number of ringbuffer descriptors 1238 * 1239 * @param socket_id 1240 * NUMA socket id, used for memory allocations 1241 * 1242 * @param tx_conf 1243 * Pointer to the structure rte_eth_txconf 1244 * 1245 * @return 1246 * - On success, return 0 1247 * - On failure, return -errno value 1248 */ 1249 static int 1250 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1251 uint16_t num_tx_descs, unsigned int socket_id, 1252 const struct rte_eth_txconf *tx_conf __rte_unused) 1253 { 1254 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1255 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no; 1256 int retval; 1257 1258 if (q_no >= lio_dev->nb_tx_queues) { 1259 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no); 1260 return -EINVAL; 1261 } 1262 1263 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no); 1264 1265 /* Free previous allocation if any */ 1266 if (eth_dev->data->tx_queues[q_no] != NULL) { 1267 lio_dev_tx_queue_release(eth_dev, q_no); 1268 eth_dev->data->tx_queues[q_no] = NULL; 1269 } 1270 1271 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no], 1272 num_tx_descs, lio_dev, socket_id); 1273 1274 if (retval) { 1275 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n"); 1276 return retval; 1277 } 1278 1279 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq, 1280 lio_dev->instr_queue[fw_mapped_iq]->nb_desc, 1281 socket_id); 1282 1283 if (retval) { 1284 lio_delete_instruction_queue(lio_dev, fw_mapped_iq); 1285 return retval; 1286 } 1287 1288 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq]; 1289 1290 return 0; 1291 } 1292 1293 /** 1294 * Release the transmit queue/ringbuffer. Called by 1295 * the upper layers. 1296 * 1297 * @param eth_dev 1298 * Pointer to Ethernet device structure. 1299 * @param q_no 1300 * Transmit queue index. 1301 * 1302 * @return 1303 * - nothing 1304 */ 1305 void 1306 lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no) 1307 { 1308 struct lio_instr_queue *tq = dev->data->tx_queues[q_no]; 1309 uint32_t fw_mapped_iq_no; 1310 1311 1312 if (tq) { 1313 /* Free sg_list */ 1314 lio_delete_sglist(tq); 1315 1316 fw_mapped_iq_no = tq->txpciq.s.q_no; 1317 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no); 1318 } 1319 } 1320 1321 /** 1322 * Api to check link state. 1323 */ 1324 static void 1325 lio_dev_get_link_status(struct rte_eth_dev *eth_dev) 1326 { 1327 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1328 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1329 struct lio_link_status_resp *resp; 1330 union octeon_link_status *ls; 1331 struct lio_soft_command *sc; 1332 uint32_t resp_size; 1333 1334 if (!lio_dev->intf_open) 1335 return; 1336 1337 resp_size = sizeof(struct lio_link_status_resp); 1338 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1339 if (sc == NULL) 1340 return; 1341 1342 resp = (struct lio_link_status_resp *)sc->virtrptr; 1343 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1344 LIO_OPCODE_INFO, 0, 0, 0); 1345 1346 /* Setting wait time in seconds */ 1347 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1348 1349 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED) 1350 goto get_status_fail; 1351 1352 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1353 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1354 rte_delay_ms(1); 1355 } 1356 1357 if (resp->status) 1358 goto get_status_fail; 1359 1360 ls = &resp->link_info.link; 1361 1362 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3); 1363 1364 if (lio_dev->linfo.link.link_status64 != ls->link_status64) { 1365 if (ls->s.mtu < eth_dev->data->mtu) { 1366 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n", 1367 ls->s.mtu); 1368 eth_dev->data->mtu = ls->s.mtu; 1369 } 1370 lio_dev->linfo.link.link_status64 = ls->link_status64; 1371 lio_dev_link_update(eth_dev, 0); 1372 } 1373 1374 lio_free_soft_command(sc); 1375 1376 return; 1377 1378 get_status_fail: 1379 lio_free_soft_command(sc); 1380 } 1381 1382 /* This function will be invoked every LSC_TIMEOUT ns (100ms) 1383 * and will update link state if it changes. 1384 */ 1385 static void 1386 lio_sync_link_state_check(void *eth_dev) 1387 { 1388 struct lio_device *lio_dev = 1389 (((struct rte_eth_dev *)eth_dev)->data->dev_private); 1390 1391 if (lio_dev->port_configured) 1392 lio_dev_get_link_status(eth_dev); 1393 1394 /* Schedule periodic link status check. 1395 * Stop check if interface is close and start again while opening. 1396 */ 1397 if (lio_dev->intf_open) 1398 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, 1399 eth_dev); 1400 } 1401 1402 static int 1403 lio_dev_start(struct rte_eth_dev *eth_dev) 1404 { 1405 uint16_t mtu; 1406 uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1407 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1408 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1409 int ret = 0; 1410 1411 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id); 1412 1413 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 1414 return -1; 1415 1416 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) 1417 return -1; 1418 1419 /* Ready for link status updates */ 1420 lio_dev->intf_open = 1; 1421 rte_mb(); 1422 1423 /* Configure RSS if device configured with multiple RX queues. */ 1424 lio_dev_mq_rx_configure(eth_dev); 1425 1426 /* Before update the link info, 1427 * must set linfo.link.link_status64 to 0. 1428 */ 1429 lio_dev->linfo.link.link_status64 = 0; 1430 1431 /* start polling for lsc */ 1432 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, 1433 lio_sync_link_state_check, 1434 eth_dev); 1435 if (ret) { 1436 lio_dev_err(lio_dev, 1437 "link state check handler creation failed\n"); 1438 goto dev_lsc_handle_error; 1439 } 1440 1441 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout)) 1442 rte_delay_ms(1); 1443 1444 if (lio_dev->linfo.link.link_status64 == 0) { 1445 ret = -1; 1446 goto dev_mtu_set_error; 1447 } 1448 1449 mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN); 1450 if (mtu < RTE_ETHER_MIN_MTU) 1451 mtu = RTE_ETHER_MIN_MTU; 1452 1453 if (eth_dev->data->mtu != mtu) { 1454 ret = lio_dev_mtu_set(eth_dev, mtu); 1455 if (ret) 1456 goto dev_mtu_set_error; 1457 } 1458 1459 return 0; 1460 1461 dev_mtu_set_error: 1462 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1463 1464 dev_lsc_handle_error: 1465 lio_dev->intf_open = 0; 1466 lio_send_rx_ctrl_cmd(eth_dev, 0); 1467 1468 return ret; 1469 } 1470 1471 /* Stop device and disable input/output functions */ 1472 static int 1473 lio_dev_stop(struct rte_eth_dev *eth_dev) 1474 { 1475 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1476 1477 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id); 1478 eth_dev->data->dev_started = 0; 1479 lio_dev->intf_open = 0; 1480 rte_mb(); 1481 1482 /* Cancel callback if still running. */ 1483 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1484 1485 lio_send_rx_ctrl_cmd(eth_dev, 0); 1486 1487 lio_wait_for_instr_fetch(lio_dev); 1488 1489 /* Clear recorded link status */ 1490 lio_dev->linfo.link.link_status64 = 0; 1491 1492 return 0; 1493 } 1494 1495 static int 1496 lio_dev_set_link_up(struct rte_eth_dev *eth_dev) 1497 { 1498 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1499 1500 if (!lio_dev->intf_open) { 1501 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1502 return 0; 1503 } 1504 1505 if (lio_dev->linfo.link.s.link_up) { 1506 lio_dev_info(lio_dev, "Link is already UP\n"); 1507 return 0; 1508 } 1509 1510 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) { 1511 lio_dev_err(lio_dev, "Unable to set Link UP\n"); 1512 return -1; 1513 } 1514 1515 lio_dev->linfo.link.s.link_up = 1; 1516 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 1517 1518 return 0; 1519 } 1520 1521 static int 1522 lio_dev_set_link_down(struct rte_eth_dev *eth_dev) 1523 { 1524 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1525 1526 if (!lio_dev->intf_open) { 1527 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1528 return 0; 1529 } 1530 1531 if (!lio_dev->linfo.link.s.link_up) { 1532 lio_dev_info(lio_dev, "Link is already DOWN\n"); 1533 return 0; 1534 } 1535 1536 lio_dev->linfo.link.s.link_up = 0; 1537 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 1538 1539 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) { 1540 lio_dev->linfo.link.s.link_up = 1; 1541 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 1542 lio_dev_err(lio_dev, "Unable to set Link Down\n"); 1543 return -1; 1544 } 1545 1546 return 0; 1547 } 1548 1549 /** 1550 * Reset and stop the device. This occurs on the first 1551 * call to this routine. Subsequent calls will simply 1552 * return. NB: This will require the NIC to be rebooted. 1553 * 1554 * @param eth_dev 1555 * Pointer to the structure rte_eth_dev 1556 * 1557 * @return 1558 * - nothing 1559 */ 1560 static int 1561 lio_dev_close(struct rte_eth_dev *eth_dev) 1562 { 1563 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1564 int ret = 0; 1565 1566 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1567 return 0; 1568 1569 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id); 1570 1571 if (lio_dev->intf_open) 1572 ret = lio_dev_stop(eth_dev); 1573 1574 /* Reset ioq regs */ 1575 lio_dev->fn_list.setup_device_regs(lio_dev); 1576 1577 if (lio_dev->pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO) { 1578 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 1579 rte_delay_ms(LIO_PCI_FLR_WAIT); 1580 } 1581 1582 /* lio_free_mbox */ 1583 lio_dev->fn_list.free_mbox(lio_dev); 1584 1585 /* Free glist resources */ 1586 rte_free(lio_dev->glist_head); 1587 rte_free(lio_dev->glist_lock); 1588 lio_dev->glist_head = NULL; 1589 lio_dev->glist_lock = NULL; 1590 1591 lio_dev->port_configured = 0; 1592 1593 /* Delete all queues */ 1594 lio_dev_clear_queues(eth_dev); 1595 1596 return ret; 1597 } 1598 1599 /** 1600 * Enable tunnel rx checksum verification from firmware. 1601 */ 1602 static void 1603 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev) 1604 { 1605 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1606 struct lio_dev_ctrl_cmd ctrl_cmd; 1607 struct lio_ctrl_pkt ctrl_pkt; 1608 1609 /* flush added to prevent cmd failure 1610 * incase the queue is full 1611 */ 1612 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1613 1614 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1615 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1616 1617 ctrl_cmd.eth_dev = eth_dev; 1618 ctrl_cmd.cond = 0; 1619 1620 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL; 1621 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE; 1622 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1623 1624 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1625 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n"); 1626 return; 1627 } 1628 1629 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1630 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n"); 1631 } 1632 1633 /** 1634 * Enable checksum calculation for inner packet in a tunnel. 1635 */ 1636 static void 1637 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev) 1638 { 1639 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1640 struct lio_dev_ctrl_cmd ctrl_cmd; 1641 struct lio_ctrl_pkt ctrl_pkt; 1642 1643 /* flush added to prevent cmd failure 1644 * incase the queue is full 1645 */ 1646 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1647 1648 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1649 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1650 1651 ctrl_cmd.eth_dev = eth_dev; 1652 ctrl_cmd.cond = 0; 1653 1654 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL; 1655 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE; 1656 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1657 1658 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1659 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n"); 1660 return; 1661 } 1662 1663 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1664 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n"); 1665 } 1666 1667 static int 1668 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq, 1669 int num_rxq) 1670 { 1671 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1672 struct lio_dev_ctrl_cmd ctrl_cmd; 1673 struct lio_ctrl_pkt ctrl_pkt; 1674 1675 if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) { 1676 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1677 LIO_Q_RECONF_MIN_VERSION); 1678 return -ENOTSUP; 1679 } 1680 1681 /* flush added to prevent cmd failure 1682 * incase the queue is full 1683 */ 1684 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1685 1686 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1687 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1688 1689 ctrl_cmd.eth_dev = eth_dev; 1690 ctrl_cmd.cond = 0; 1691 1692 ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL; 1693 ctrl_pkt.ncmd.s.param1 = num_txq; 1694 ctrl_pkt.ncmd.s.param2 = num_rxq; 1695 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1696 1697 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1698 lio_dev_err(lio_dev, "Failed to send queue count control command\n"); 1699 return -1; 1700 } 1701 1702 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 1703 lio_dev_err(lio_dev, "Queue count control command timed out\n"); 1704 return -1; 1705 } 1706 1707 return 0; 1708 } 1709 1710 static int 1711 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq) 1712 { 1713 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1714 int ret; 1715 1716 if (lio_dev->nb_rx_queues != num_rxq || 1717 lio_dev->nb_tx_queues != num_txq) { 1718 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq)) 1719 return -1; 1720 lio_dev->nb_rx_queues = num_rxq; 1721 lio_dev->nb_tx_queues = num_txq; 1722 } 1723 1724 if (lio_dev->intf_open) { 1725 ret = lio_dev_stop(eth_dev); 1726 if (ret != 0) 1727 return ret; 1728 } 1729 1730 /* Reset ioq registers */ 1731 if (lio_dev->fn_list.setup_device_regs(lio_dev)) { 1732 lio_dev_err(lio_dev, "Failed to configure device registers\n"); 1733 return -1; 1734 } 1735 1736 return 0; 1737 } 1738 1739 static int 1740 lio_dev_configure(struct rte_eth_dev *eth_dev) 1741 { 1742 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1743 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1744 int retval, num_iqueues, num_oqueues; 1745 uint8_t mac[RTE_ETHER_ADDR_LEN], i; 1746 struct lio_if_cfg_resp *resp; 1747 struct lio_soft_command *sc; 1748 union lio_if_cfg if_cfg; 1749 uint32_t resp_size; 1750 1751 PMD_INIT_FUNC_TRACE(); 1752 1753 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1754 eth_dev->data->dev_conf.rxmode.offloads |= 1755 DEV_RX_OFFLOAD_RSS_HASH; 1756 1757 /* Inform firmware about change in number of queues to use. 1758 * Disable IO queues and reset registers for re-configuration. 1759 */ 1760 if (lio_dev->port_configured) 1761 return lio_reconf_queues(eth_dev, 1762 eth_dev->data->nb_tx_queues, 1763 eth_dev->data->nb_rx_queues); 1764 1765 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues; 1766 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues; 1767 1768 /* Set max number of queues which can be re-configured. */ 1769 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues; 1770 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues; 1771 1772 resp_size = sizeof(struct lio_if_cfg_resp); 1773 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1774 if (sc == NULL) 1775 return -ENOMEM; 1776 1777 resp = (struct lio_if_cfg_resp *)sc->virtrptr; 1778 1779 /* Firmware doesn't have capability to reconfigure the queues, 1780 * Claim all queues, and use as many required 1781 */ 1782 if_cfg.if_cfg64 = 0; 1783 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues; 1784 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues; 1785 if_cfg.s.base_queue = 0; 1786 1787 if_cfg.s.gmx_port_id = lio_dev->pf_num; 1788 1789 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1790 LIO_OPCODE_IF_CFG, 0, 1791 if_cfg.if_cfg64, 0); 1792 1793 /* Setting wait time in seconds */ 1794 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1795 1796 retval = lio_send_soft_command(lio_dev, sc); 1797 if (retval == LIO_IQ_SEND_FAILED) { 1798 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n", 1799 retval); 1800 /* Soft instr is freed by driver in case of failure. */ 1801 goto nic_config_fail; 1802 } 1803 1804 /* Sleep on a wait queue till the cond flag indicates that the 1805 * response arrived or timed-out. 1806 */ 1807 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1808 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1809 lio_process_ordered_list(lio_dev); 1810 rte_delay_ms(1); 1811 } 1812 1813 retval = resp->status; 1814 if (retval) { 1815 lio_dev_err(lio_dev, "iq/oq config failed\n"); 1816 goto nic_config_fail; 1817 } 1818 1819 strlcpy(lio_dev->firmware_version, 1820 resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH); 1821 1822 lio_swap_8B_data((uint64_t *)(&resp->cfg_info), 1823 sizeof(struct octeon_if_cfg_info) >> 3); 1824 1825 num_iqueues = lio_hweight64(resp->cfg_info.iqmask); 1826 num_oqueues = lio_hweight64(resp->cfg_info.oqmask); 1827 1828 if (!(num_iqueues) || !(num_oqueues)) { 1829 lio_dev_err(lio_dev, 1830 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n", 1831 (unsigned long)resp->cfg_info.iqmask, 1832 (unsigned long)resp->cfg_info.oqmask); 1833 goto nic_config_fail; 1834 } 1835 1836 lio_dev_dbg(lio_dev, 1837 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n", 1838 eth_dev->data->port_id, 1839 (unsigned long)resp->cfg_info.iqmask, 1840 (unsigned long)resp->cfg_info.oqmask, 1841 num_iqueues, num_oqueues); 1842 1843 lio_dev->linfo.num_rxpciq = num_oqueues; 1844 lio_dev->linfo.num_txpciq = num_iqueues; 1845 1846 for (i = 0; i < num_oqueues; i++) { 1847 lio_dev->linfo.rxpciq[i].rxpciq64 = 1848 resp->cfg_info.linfo.rxpciq[i].rxpciq64; 1849 lio_dev_dbg(lio_dev, "index %d OQ %d\n", 1850 i, lio_dev->linfo.rxpciq[i].s.q_no); 1851 } 1852 1853 for (i = 0; i < num_iqueues; i++) { 1854 lio_dev->linfo.txpciq[i].txpciq64 = 1855 resp->cfg_info.linfo.txpciq[i].txpciq64; 1856 lio_dev_dbg(lio_dev, "index %d IQ %d\n", 1857 i, lio_dev->linfo.txpciq[i].s.q_no); 1858 } 1859 1860 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1861 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1862 lio_dev->linfo.link.link_status64 = 1863 resp->cfg_info.linfo.link.link_status64; 1864 1865 /* 64-bit swap required on LE machines */ 1866 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1); 1867 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 1868 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) + 1869 2 + i)); 1870 1871 /* Copy the permanent MAC address */ 1872 rte_ether_addr_copy((struct rte_ether_addr *)mac, 1873 ð_dev->data->mac_addrs[0]); 1874 1875 /* enable firmware checksum support for tunnel packets */ 1876 lio_enable_hw_tunnel_rx_checksum(eth_dev); 1877 lio_enable_hw_tunnel_tx_checksum(eth_dev); 1878 1879 lio_dev->glist_lock = 1880 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0); 1881 if (lio_dev->glist_lock == NULL) 1882 return -ENOMEM; 1883 1884 lio_dev->glist_head = 1885 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues, 1886 0); 1887 if (lio_dev->glist_head == NULL) { 1888 rte_free(lio_dev->glist_lock); 1889 lio_dev->glist_lock = NULL; 1890 return -ENOMEM; 1891 } 1892 1893 lio_dev_link_update(eth_dev, 0); 1894 1895 lio_dev->port_configured = 1; 1896 1897 lio_free_soft_command(sc); 1898 1899 /* Reset ioq regs */ 1900 lio_dev->fn_list.setup_device_regs(lio_dev); 1901 1902 /* Free iq_0 used during init */ 1903 lio_free_instr_queue0(lio_dev); 1904 1905 return 0; 1906 1907 nic_config_fail: 1908 lio_dev_err(lio_dev, "Failed retval %d\n", retval); 1909 lio_free_soft_command(sc); 1910 lio_free_instr_queue0(lio_dev); 1911 1912 return -ENODEV; 1913 } 1914 1915 /* Define our ethernet definitions */ 1916 static const struct eth_dev_ops liovf_eth_dev_ops = { 1917 .dev_configure = lio_dev_configure, 1918 .dev_start = lio_dev_start, 1919 .dev_stop = lio_dev_stop, 1920 .dev_set_link_up = lio_dev_set_link_up, 1921 .dev_set_link_down = lio_dev_set_link_down, 1922 .dev_close = lio_dev_close, 1923 .promiscuous_enable = lio_dev_promiscuous_enable, 1924 .promiscuous_disable = lio_dev_promiscuous_disable, 1925 .allmulticast_enable = lio_dev_allmulticast_enable, 1926 .allmulticast_disable = lio_dev_allmulticast_disable, 1927 .link_update = lio_dev_link_update, 1928 .stats_get = lio_dev_stats_get, 1929 .xstats_get = lio_dev_xstats_get, 1930 .xstats_get_names = lio_dev_xstats_get_names, 1931 .stats_reset = lio_dev_stats_reset, 1932 .xstats_reset = lio_dev_xstats_reset, 1933 .dev_infos_get = lio_dev_info_get, 1934 .vlan_filter_set = lio_dev_vlan_filter_set, 1935 .rx_queue_setup = lio_dev_rx_queue_setup, 1936 .rx_queue_release = lio_dev_rx_queue_release, 1937 .tx_queue_setup = lio_dev_tx_queue_setup, 1938 .tx_queue_release = lio_dev_tx_queue_release, 1939 .reta_update = lio_dev_rss_reta_update, 1940 .reta_query = lio_dev_rss_reta_query, 1941 .rss_hash_conf_get = lio_dev_rss_hash_conf_get, 1942 .rss_hash_update = lio_dev_rss_hash_update, 1943 .udp_tunnel_port_add = lio_dev_udp_tunnel_add, 1944 .udp_tunnel_port_del = lio_dev_udp_tunnel_del, 1945 .mtu_set = lio_dev_mtu_set, 1946 }; 1947 1948 static void 1949 lio_check_pf_hs_response(void *lio_dev) 1950 { 1951 struct lio_device *dev = lio_dev; 1952 1953 /* check till response arrives */ 1954 if (dev->pfvf_hsword.coproc_tics_per_us) 1955 return; 1956 1957 cn23xx_vf_handle_mbox(dev); 1958 1959 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev); 1960 } 1961 1962 /** 1963 * \brief Identify the LIO device and to map the BAR address space 1964 * @param lio_dev lio device 1965 */ 1966 static int 1967 lio_chip_specific_setup(struct lio_device *lio_dev) 1968 { 1969 struct rte_pci_device *pdev = lio_dev->pci_dev; 1970 uint32_t dev_id = pdev->id.device_id; 1971 const char *s; 1972 int ret = 1; 1973 1974 switch (dev_id) { 1975 case LIO_CN23XX_VF_VID: 1976 lio_dev->chip_id = LIO_CN23XX_VF_VID; 1977 ret = cn23xx_vf_setup_device(lio_dev); 1978 s = "CN23XX VF"; 1979 break; 1980 default: 1981 s = "?"; 1982 lio_dev_err(lio_dev, "Unsupported Chip\n"); 1983 } 1984 1985 if (!ret) 1986 lio_dev_info(lio_dev, "DEVICE : %s\n", s); 1987 1988 return ret; 1989 } 1990 1991 static int 1992 lio_first_time_init(struct lio_device *lio_dev, 1993 struct rte_pci_device *pdev) 1994 { 1995 int dpdk_queues; 1996 1997 PMD_INIT_FUNC_TRACE(); 1998 1999 /* set dpdk specific pci device pointer */ 2000 lio_dev->pci_dev = pdev; 2001 2002 /* Identify the LIO type and set device ops */ 2003 if (lio_chip_specific_setup(lio_dev)) { 2004 lio_dev_err(lio_dev, "Chip specific setup failed\n"); 2005 return -1; 2006 } 2007 2008 /* Initialize soft command buffer pool */ 2009 if (lio_setup_sc_buffer_pool(lio_dev)) { 2010 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n"); 2011 return -1; 2012 } 2013 2014 /* Initialize lists to manage the requests of different types that 2015 * arrive from applications for this lio device. 2016 */ 2017 lio_setup_response_list(lio_dev); 2018 2019 if (lio_dev->fn_list.setup_mbox(lio_dev)) { 2020 lio_dev_err(lio_dev, "Mailbox setup failed\n"); 2021 goto error; 2022 } 2023 2024 /* Check PF response */ 2025 lio_check_pf_hs_response((void *)lio_dev); 2026 2027 /* Do handshake and exit if incompatible PF driver */ 2028 if (cn23xx_pfvf_handshake(lio_dev)) 2029 goto error; 2030 2031 /* Request and wait for device reset. */ 2032 if (pdev->kdrv == RTE_PCI_KDRV_IGB_UIO) { 2033 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 2034 /* FLR wait time doubled as a precaution. */ 2035 rte_delay_ms(LIO_PCI_FLR_WAIT * 2); 2036 } 2037 2038 if (lio_dev->fn_list.setup_device_regs(lio_dev)) { 2039 lio_dev_err(lio_dev, "Failed to configure device registers\n"); 2040 goto error; 2041 } 2042 2043 if (lio_setup_instr_queue0(lio_dev)) { 2044 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n"); 2045 goto error; 2046 } 2047 2048 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf; 2049 2050 lio_dev->max_tx_queues = dpdk_queues; 2051 lio_dev->max_rx_queues = dpdk_queues; 2052 2053 /* Enable input and output queues for this device */ 2054 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 2055 goto error; 2056 2057 return 0; 2058 2059 error: 2060 lio_free_sc_buffer_pool(lio_dev); 2061 if (lio_dev->mbox[0]) 2062 lio_dev->fn_list.free_mbox(lio_dev); 2063 if (lio_dev->instr_queue[0]) 2064 lio_free_instr_queue0(lio_dev); 2065 2066 return -1; 2067 } 2068 2069 static int 2070 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2071 { 2072 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2073 2074 PMD_INIT_FUNC_TRACE(); 2075 2076 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2077 return 0; 2078 2079 /* lio_free_sc_buffer_pool */ 2080 lio_free_sc_buffer_pool(lio_dev); 2081 2082 return 0; 2083 } 2084 2085 static int 2086 lio_eth_dev_init(struct rte_eth_dev *eth_dev) 2087 { 2088 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 2089 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2090 2091 PMD_INIT_FUNC_TRACE(); 2092 2093 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts; 2094 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts; 2095 2096 /* Primary does the initialization. */ 2097 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2098 return 0; 2099 2100 rte_eth_copy_pci_info(eth_dev, pdev); 2101 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2102 2103 if (pdev->mem_resource[0].addr) { 2104 lio_dev->hw_addr = pdev->mem_resource[0].addr; 2105 } else { 2106 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n"); 2107 return -ENODEV; 2108 } 2109 2110 lio_dev->eth_dev = eth_dev; 2111 /* set lio device print string */ 2112 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string), 2113 "%s[%02x:%02x.%x]", pdev->driver->driver.name, 2114 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 2115 2116 lio_dev->port_id = eth_dev->data->port_id; 2117 2118 if (lio_first_time_init(lio_dev, pdev)) { 2119 lio_dev_err(lio_dev, "Device init failed\n"); 2120 return -EINVAL; 2121 } 2122 2123 eth_dev->dev_ops = &liovf_eth_dev_ops; 2124 eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0); 2125 if (eth_dev->data->mac_addrs == NULL) { 2126 lio_dev_err(lio_dev, 2127 "MAC addresses memory allocation failed\n"); 2128 eth_dev->dev_ops = NULL; 2129 eth_dev->rx_pkt_burst = NULL; 2130 eth_dev->tx_pkt_burst = NULL; 2131 return -ENOMEM; 2132 } 2133 2134 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING); 2135 rte_wmb(); 2136 2137 lio_dev->port_configured = 0; 2138 /* Always allow unicast packets */ 2139 lio_dev->ifflags |= LIO_IFFLAG_UNICAST; 2140 2141 return 0; 2142 } 2143 2144 static int 2145 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2146 struct rte_pci_device *pci_dev) 2147 { 2148 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device), 2149 lio_eth_dev_init); 2150 } 2151 2152 static int 2153 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2154 { 2155 return rte_eth_dev_pci_generic_remove(pci_dev, 2156 lio_eth_dev_uninit); 2157 } 2158 2159 /* Set of PCI devices this driver supports */ 2160 static const struct rte_pci_id pci_id_liovf_map[] = { 2161 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) }, 2162 { .vendor_id = 0, /* sentinel */ } 2163 }; 2164 2165 static struct rte_pci_driver rte_liovf_pmd = { 2166 .id_table = pci_id_liovf_map, 2167 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2168 .probe = lio_eth_dev_pci_probe, 2169 .remove = lio_eth_dev_pci_remove, 2170 }; 2171 2172 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd); 2173 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map); 2174 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci"); 2175 RTE_LOG_REGISTER_SUFFIX(lio_logtype_init, init, NOTICE); 2176 RTE_LOG_REGISTER_SUFFIX(lio_logtype_driver, driver, NOTICE); 2177