1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <rte_string_fns.h> 6 #include <rte_ethdev_driver.h> 7 #include <rte_ethdev_pci.h> 8 #include <rte_cycles.h> 9 #include <rte_malloc.h> 10 #include <rte_alarm.h> 11 #include <rte_ether.h> 12 13 #include "lio_logs.h" 14 #include "lio_23xx_vf.h" 15 #include "lio_ethdev.h" 16 #include "lio_rxtx.h" 17 18 /* Default RSS key in use */ 19 static uint8_t lio_rss_key[40] = { 20 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 21 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 22 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 23 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 24 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, 25 }; 26 27 static const struct rte_eth_desc_lim lio_rx_desc_lim = { 28 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS, 29 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS, 30 .nb_align = 1, 31 }; 32 33 static const struct rte_eth_desc_lim lio_tx_desc_lim = { 34 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS, 35 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS, 36 .nb_align = 1, 37 }; 38 39 /* Wait for control command to reach nic. */ 40 static uint16_t 41 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev, 42 struct lio_dev_ctrl_cmd *ctrl_cmd) 43 { 44 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 45 46 while ((ctrl_cmd->cond == 0) && --timeout) { 47 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 48 rte_delay_ms(1); 49 } 50 51 return !timeout; 52 } 53 54 /** 55 * \brief Send Rx control command 56 * @param eth_dev Pointer to the structure rte_eth_dev 57 * @param start_stop whether to start or stop 58 */ 59 static int 60 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop) 61 { 62 struct lio_device *lio_dev = LIO_DEV(eth_dev); 63 struct lio_dev_ctrl_cmd ctrl_cmd; 64 struct lio_ctrl_pkt ctrl_pkt; 65 66 /* flush added to prevent cmd failure 67 * incase the queue is full 68 */ 69 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 70 71 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 72 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 73 74 ctrl_cmd.eth_dev = eth_dev; 75 ctrl_cmd.cond = 0; 76 77 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL; 78 ctrl_pkt.ncmd.s.param1 = start_stop; 79 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 80 81 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 82 lio_dev_err(lio_dev, "Failed to send RX Control message\n"); 83 return -1; 84 } 85 86 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 87 lio_dev_err(lio_dev, "RX Control command timed out\n"); 88 return -1; 89 } 90 91 return 0; 92 } 93 94 /* store statistics names and its offset in stats structure */ 95 struct rte_lio_xstats_name_off { 96 char name[RTE_ETH_XSTATS_NAME_SIZE]; 97 unsigned int offset; 98 }; 99 100 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = { 101 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)}, 102 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)}, 103 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)}, 104 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)}, 105 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)}, 106 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)}, 107 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)}, 108 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)}, 109 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)}, 110 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)}, 111 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)}, 112 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)}, 113 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)}, 114 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) + 115 sizeof(struct octeon_rx_stats)}, 116 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) + 117 sizeof(struct octeon_rx_stats)}, 118 {"tx_broadcast_pkts", 119 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) + 120 sizeof(struct octeon_rx_stats)}, 121 {"tx_multicast_pkts", 122 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) + 123 sizeof(struct octeon_rx_stats)}, 124 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) + 125 sizeof(struct octeon_rx_stats)}, 126 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) + 127 sizeof(struct octeon_rx_stats)}, 128 {"tx_total_collisions", (offsetof(struct octeon_tx_stats, 129 total_collisions)) + 130 sizeof(struct octeon_rx_stats)}, 131 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) + 132 sizeof(struct octeon_rx_stats)}, 133 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) + 134 sizeof(struct octeon_rx_stats)}, 135 }; 136 137 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings) 138 139 /* Get hw stats of the port */ 140 static int 141 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, 142 unsigned int n) 143 { 144 struct lio_device *lio_dev = LIO_DEV(eth_dev); 145 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 146 struct octeon_link_stats *hw_stats; 147 struct lio_link_stats_resp *resp; 148 struct lio_soft_command *sc; 149 uint32_t resp_size; 150 unsigned int i; 151 int retval; 152 153 if (!lio_dev->intf_open) { 154 lio_dev_err(lio_dev, "Port %d down\n", 155 lio_dev->port_id); 156 return -EINVAL; 157 } 158 159 if (n < LIO_NB_XSTATS) 160 return LIO_NB_XSTATS; 161 162 resp_size = sizeof(struct lio_link_stats_resp); 163 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 164 if (sc == NULL) 165 return -ENOMEM; 166 167 resp = (struct lio_link_stats_resp *)sc->virtrptr; 168 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 169 LIO_OPCODE_PORT_STATS, 0, 0, 0); 170 171 /* Setting wait time in seconds */ 172 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 173 174 retval = lio_send_soft_command(lio_dev, sc); 175 if (retval == LIO_IQ_SEND_FAILED) { 176 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n", 177 retval); 178 goto get_stats_fail; 179 } 180 181 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 182 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 183 lio_process_ordered_list(lio_dev); 184 rte_delay_ms(1); 185 } 186 187 retval = resp->status; 188 if (retval) { 189 lio_dev_err(lio_dev, "failed to get port stats from firmware\n"); 190 goto get_stats_fail; 191 } 192 193 lio_swap_8B_data((uint64_t *)(&resp->link_stats), 194 sizeof(struct octeon_link_stats) >> 3); 195 196 hw_stats = &resp->link_stats; 197 198 for (i = 0; i < LIO_NB_XSTATS; i++) { 199 xstats[i].id = i; 200 xstats[i].value = 201 *(uint64_t *)(((char *)hw_stats) + 202 rte_lio_stats_strings[i].offset); 203 } 204 205 lio_free_soft_command(sc); 206 207 return LIO_NB_XSTATS; 208 209 get_stats_fail: 210 lio_free_soft_command(sc); 211 212 return -1; 213 } 214 215 static int 216 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev, 217 struct rte_eth_xstat_name *xstats_names, 218 unsigned limit __rte_unused) 219 { 220 struct lio_device *lio_dev = LIO_DEV(eth_dev); 221 unsigned int i; 222 223 if (!lio_dev->intf_open) { 224 lio_dev_err(lio_dev, "Port %d down\n", 225 lio_dev->port_id); 226 return -EINVAL; 227 } 228 229 if (xstats_names == NULL) 230 return LIO_NB_XSTATS; 231 232 /* Note: limit checked in rte_eth_xstats_names() */ 233 234 for (i = 0; i < LIO_NB_XSTATS; i++) { 235 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), 236 "%s", rte_lio_stats_strings[i].name); 237 } 238 239 return LIO_NB_XSTATS; 240 } 241 242 /* Reset hw stats for the port */ 243 static int 244 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev) 245 { 246 struct lio_device *lio_dev = LIO_DEV(eth_dev); 247 struct lio_dev_ctrl_cmd ctrl_cmd; 248 struct lio_ctrl_pkt ctrl_pkt; 249 int ret; 250 251 if (!lio_dev->intf_open) { 252 lio_dev_err(lio_dev, "Port %d down\n", 253 lio_dev->port_id); 254 return -EINVAL; 255 } 256 257 /* flush added to prevent cmd failure 258 * incase the queue is full 259 */ 260 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 261 262 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 263 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 264 265 ctrl_cmd.eth_dev = eth_dev; 266 ctrl_cmd.cond = 0; 267 268 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS; 269 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 270 271 ret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt); 272 if (ret != 0) { 273 lio_dev_err(lio_dev, "Failed to send clear stats command\n"); 274 return ret; 275 } 276 277 ret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd); 278 if (ret != 0) { 279 lio_dev_err(lio_dev, "Clear stats command timed out\n"); 280 return ret; 281 } 282 283 /* clear stored per queue stats */ 284 RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0); 285 return (*eth_dev->dev_ops->stats_reset)(eth_dev); 286 } 287 288 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */ 289 static int 290 lio_dev_stats_get(struct rte_eth_dev *eth_dev, 291 struct rte_eth_stats *stats) 292 { 293 struct lio_device *lio_dev = LIO_DEV(eth_dev); 294 struct lio_droq_stats *oq_stats; 295 struct lio_iq_stats *iq_stats; 296 struct lio_instr_queue *txq; 297 struct lio_droq *droq; 298 int i, iq_no, oq_no; 299 uint64_t bytes = 0; 300 uint64_t pkts = 0; 301 uint64_t drop = 0; 302 303 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 304 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 305 txq = lio_dev->instr_queue[iq_no]; 306 if (txq != NULL) { 307 iq_stats = &txq->stats; 308 pkts += iq_stats->tx_done; 309 drop += iq_stats->tx_dropped; 310 bytes += iq_stats->tx_tot_bytes; 311 } 312 } 313 314 stats->opackets = pkts; 315 stats->obytes = bytes; 316 stats->oerrors = drop; 317 318 pkts = 0; 319 drop = 0; 320 bytes = 0; 321 322 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 323 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 324 droq = lio_dev->droq[oq_no]; 325 if (droq != NULL) { 326 oq_stats = &droq->stats; 327 pkts += oq_stats->rx_pkts_received; 328 drop += (oq_stats->rx_dropped + 329 oq_stats->dropped_toomany + 330 oq_stats->dropped_nomem); 331 bytes += oq_stats->rx_bytes_received; 332 } 333 } 334 stats->ibytes = bytes; 335 stats->ipackets = pkts; 336 stats->ierrors = drop; 337 338 return 0; 339 } 340 341 static int 342 lio_dev_stats_reset(struct rte_eth_dev *eth_dev) 343 { 344 struct lio_device *lio_dev = LIO_DEV(eth_dev); 345 struct lio_droq_stats *oq_stats; 346 struct lio_iq_stats *iq_stats; 347 struct lio_instr_queue *txq; 348 struct lio_droq *droq; 349 int i, iq_no, oq_no; 350 351 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 352 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 353 txq = lio_dev->instr_queue[iq_no]; 354 if (txq != NULL) { 355 iq_stats = &txq->stats; 356 memset(iq_stats, 0, sizeof(struct lio_iq_stats)); 357 } 358 } 359 360 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 361 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 362 droq = lio_dev->droq[oq_no]; 363 if (droq != NULL) { 364 oq_stats = &droq->stats; 365 memset(oq_stats, 0, sizeof(struct lio_droq_stats)); 366 } 367 } 368 369 return 0; 370 } 371 372 static int 373 lio_dev_info_get(struct rte_eth_dev *eth_dev, 374 struct rte_eth_dev_info *devinfo) 375 { 376 struct lio_device *lio_dev = LIO_DEV(eth_dev); 377 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 378 379 switch (pci_dev->id.subsystem_device_id) { 380 /* CN23xx 10G cards */ 381 case PCI_SUBSYS_DEV_ID_CN2350_210: 382 case PCI_SUBSYS_DEV_ID_CN2360_210: 383 case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3: 384 case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3: 385 case PCI_SUBSYS_DEV_ID_CN2350_210SVPT: 386 case PCI_SUBSYS_DEV_ID_CN2360_210SVPT: 387 devinfo->speed_capa = ETH_LINK_SPEED_10G; 388 break; 389 /* CN23xx 25G cards */ 390 case PCI_SUBSYS_DEV_ID_CN2350_225: 391 case PCI_SUBSYS_DEV_ID_CN2360_225: 392 devinfo->speed_capa = ETH_LINK_SPEED_25G; 393 break; 394 default: 395 devinfo->speed_capa = ETH_LINK_SPEED_10G; 396 lio_dev_err(lio_dev, 397 "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n"); 398 return -EINVAL; 399 } 400 401 devinfo->max_rx_queues = lio_dev->max_rx_queues; 402 devinfo->max_tx_queues = lio_dev->max_tx_queues; 403 404 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE; 405 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN; 406 407 devinfo->max_mac_addrs = 1; 408 409 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 410 DEV_RX_OFFLOAD_UDP_CKSUM | 411 DEV_RX_OFFLOAD_TCP_CKSUM | 412 DEV_RX_OFFLOAD_VLAN_STRIP | 413 DEV_RX_OFFLOAD_RSS_HASH); 414 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM | 415 DEV_TX_OFFLOAD_UDP_CKSUM | 416 DEV_TX_OFFLOAD_TCP_CKSUM | 417 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); 418 419 devinfo->rx_desc_lim = lio_rx_desc_lim; 420 devinfo->tx_desc_lim = lio_tx_desc_lim; 421 422 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ; 423 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ; 424 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 | 425 ETH_RSS_NONFRAG_IPV4_TCP | 426 ETH_RSS_IPV6 | 427 ETH_RSS_NONFRAG_IPV6_TCP | 428 ETH_RSS_IPV6_EX | 429 ETH_RSS_IPV6_TCP_EX); 430 return 0; 431 } 432 433 static int 434 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 435 { 436 struct lio_device *lio_dev = LIO_DEV(eth_dev); 437 uint16_t pf_mtu = lio_dev->linfo.link.s.mtu; 438 uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; 439 struct lio_dev_ctrl_cmd ctrl_cmd; 440 struct lio_ctrl_pkt ctrl_pkt; 441 442 PMD_INIT_FUNC_TRACE(); 443 444 if (!lio_dev->intf_open) { 445 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n", 446 lio_dev->port_id); 447 return -EINVAL; 448 } 449 450 /* check if VF MTU is within allowed range. 451 * New value should not exceed PF MTU. 452 */ 453 if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) { 454 lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n", 455 RTE_ETHER_MIN_MTU, pf_mtu); 456 return -EINVAL; 457 } 458 459 /* flush added to prevent cmd failure 460 * incase the queue is full 461 */ 462 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 463 464 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 465 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 466 467 ctrl_cmd.eth_dev = eth_dev; 468 ctrl_cmd.cond = 0; 469 470 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU; 471 ctrl_pkt.ncmd.s.param1 = mtu; 472 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 473 474 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 475 lio_dev_err(lio_dev, "Failed to send command to change MTU\n"); 476 return -1; 477 } 478 479 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 480 lio_dev_err(lio_dev, "Command to change MTU timed out\n"); 481 return -1; 482 } 483 484 if (frame_len > RTE_ETHER_MAX_LEN) 485 eth_dev->data->dev_conf.rxmode.offloads |= 486 DEV_RX_OFFLOAD_JUMBO_FRAME; 487 else 488 eth_dev->data->dev_conf.rxmode.offloads &= 489 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 490 491 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; 492 eth_dev->data->mtu = mtu; 493 494 return 0; 495 } 496 497 static int 498 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev, 499 struct rte_eth_rss_reta_entry64 *reta_conf, 500 uint16_t reta_size) 501 { 502 struct lio_device *lio_dev = LIO_DEV(eth_dev); 503 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 504 struct lio_rss_set *rss_param; 505 struct lio_dev_ctrl_cmd ctrl_cmd; 506 struct lio_ctrl_pkt ctrl_pkt; 507 int i, j, index; 508 509 if (!lio_dev->intf_open) { 510 lio_dev_err(lio_dev, "Port %d down, can't update reta\n", 511 lio_dev->port_id); 512 return -EINVAL; 513 } 514 515 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 516 lio_dev_err(lio_dev, 517 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 518 reta_size, LIO_RSS_MAX_TABLE_SZ); 519 return -EINVAL; 520 } 521 522 /* flush added to prevent cmd failure 523 * incase the queue is full 524 */ 525 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 526 527 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 528 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 529 530 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 531 532 ctrl_cmd.eth_dev = eth_dev; 533 ctrl_cmd.cond = 0; 534 535 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 536 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 537 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 538 539 rss_param->param.flags = 0xF; 540 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED; 541 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ; 542 543 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) { 544 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) { 545 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) { 546 index = (i * RTE_RETA_GROUP_SIZE) + j; 547 rss_state->itable[index] = reta_conf[i].reta[j]; 548 } 549 } 550 } 551 552 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ; 553 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size); 554 555 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 556 557 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 558 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 559 return -1; 560 } 561 562 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 563 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 564 return -1; 565 } 566 567 return 0; 568 } 569 570 static int 571 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev, 572 struct rte_eth_rss_reta_entry64 *reta_conf, 573 uint16_t reta_size) 574 { 575 struct lio_device *lio_dev = LIO_DEV(eth_dev); 576 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 577 int i, num; 578 579 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 580 lio_dev_err(lio_dev, 581 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 582 reta_size, LIO_RSS_MAX_TABLE_SZ); 583 return -EINVAL; 584 } 585 586 num = reta_size / RTE_RETA_GROUP_SIZE; 587 588 for (i = 0; i < num; i++) { 589 memcpy(reta_conf->reta, 590 &rss_state->itable[i * RTE_RETA_GROUP_SIZE], 591 RTE_RETA_GROUP_SIZE); 592 reta_conf++; 593 } 594 595 return 0; 596 } 597 598 static int 599 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 600 struct rte_eth_rss_conf *rss_conf) 601 { 602 struct lio_device *lio_dev = LIO_DEV(eth_dev); 603 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 604 uint8_t *hash_key = NULL; 605 uint64_t rss_hf = 0; 606 607 if (rss_state->hash_disable) { 608 lio_dev_info(lio_dev, "RSS disabled in nic\n"); 609 rss_conf->rss_hf = 0; 610 return 0; 611 } 612 613 /* Get key value */ 614 hash_key = rss_conf->rss_key; 615 if (hash_key != NULL) 616 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size); 617 618 if (rss_state->ip) 619 rss_hf |= ETH_RSS_IPV4; 620 if (rss_state->tcp_hash) 621 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 622 if (rss_state->ipv6) 623 rss_hf |= ETH_RSS_IPV6; 624 if (rss_state->ipv6_tcp_hash) 625 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 626 if (rss_state->ipv6_ex) 627 rss_hf |= ETH_RSS_IPV6_EX; 628 if (rss_state->ipv6_tcp_ex_hash) 629 rss_hf |= ETH_RSS_IPV6_TCP_EX; 630 631 rss_conf->rss_hf = rss_hf; 632 633 return 0; 634 } 635 636 static int 637 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev, 638 struct rte_eth_rss_conf *rss_conf) 639 { 640 struct lio_device *lio_dev = LIO_DEV(eth_dev); 641 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 642 struct lio_rss_set *rss_param; 643 struct lio_dev_ctrl_cmd ctrl_cmd; 644 struct lio_ctrl_pkt ctrl_pkt; 645 646 if (!lio_dev->intf_open) { 647 lio_dev_err(lio_dev, "Port %d down, can't update hash\n", 648 lio_dev->port_id); 649 return -EINVAL; 650 } 651 652 /* flush added to prevent cmd failure 653 * incase the queue is full 654 */ 655 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 656 657 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 658 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 659 660 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 661 662 ctrl_cmd.eth_dev = eth_dev; 663 ctrl_cmd.cond = 0; 664 665 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 666 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 667 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 668 669 rss_param->param.flags = 0xF; 670 671 if (rss_conf->rss_key) { 672 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED; 673 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ; 674 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ; 675 memcpy(rss_state->hash_key, rss_conf->rss_key, 676 rss_state->hash_key_size); 677 memcpy(rss_param->key, rss_state->hash_key, 678 rss_state->hash_key_size); 679 } 680 681 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 682 /* Can't disable rss through hash flags, 683 * if it is enabled by default during init 684 */ 685 if (!rss_state->hash_disable) 686 return -EINVAL; 687 688 /* This is for --disable-rss during testpmd launch */ 689 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS; 690 } else { 691 uint32_t hashinfo = 0; 692 693 /* Can't enable rss if disabled by default during init */ 694 if (rss_state->hash_disable) 695 return -EINVAL; 696 697 if (rss_conf->rss_hf & ETH_RSS_IPV4) { 698 hashinfo |= LIO_RSS_HASH_IPV4; 699 rss_state->ip = 1; 700 } else { 701 rss_state->ip = 0; 702 } 703 704 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 705 hashinfo |= LIO_RSS_HASH_TCP_IPV4; 706 rss_state->tcp_hash = 1; 707 } else { 708 rss_state->tcp_hash = 0; 709 } 710 711 if (rss_conf->rss_hf & ETH_RSS_IPV6) { 712 hashinfo |= LIO_RSS_HASH_IPV6; 713 rss_state->ipv6 = 1; 714 } else { 715 rss_state->ipv6 = 0; 716 } 717 718 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { 719 hashinfo |= LIO_RSS_HASH_TCP_IPV6; 720 rss_state->ipv6_tcp_hash = 1; 721 } else { 722 rss_state->ipv6_tcp_hash = 0; 723 } 724 725 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) { 726 hashinfo |= LIO_RSS_HASH_IPV6_EX; 727 rss_state->ipv6_ex = 1; 728 } else { 729 rss_state->ipv6_ex = 0; 730 } 731 732 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) { 733 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX; 734 rss_state->ipv6_tcp_ex_hash = 1; 735 } else { 736 rss_state->ipv6_tcp_ex_hash = 0; 737 } 738 739 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED; 740 rss_param->param.hashinfo = hashinfo; 741 } 742 743 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 744 745 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 746 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 747 return -1; 748 } 749 750 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 751 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 752 return -1; 753 } 754 755 return 0; 756 } 757 758 /** 759 * Add vxlan dest udp port for an interface. 760 * 761 * @param eth_dev 762 * Pointer to the structure rte_eth_dev 763 * @param udp_tnl 764 * udp tunnel conf 765 * 766 * @return 767 * On success return 0 768 * On failure return -1 769 */ 770 static int 771 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev, 772 struct rte_eth_udp_tunnel *udp_tnl) 773 { 774 struct lio_device *lio_dev = LIO_DEV(eth_dev); 775 struct lio_dev_ctrl_cmd ctrl_cmd; 776 struct lio_ctrl_pkt ctrl_pkt; 777 778 if (udp_tnl == NULL) 779 return -EINVAL; 780 781 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { 782 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 783 return -1; 784 } 785 786 /* flush added to prevent cmd failure 787 * incase the queue is full 788 */ 789 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 790 791 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 792 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 793 794 ctrl_cmd.eth_dev = eth_dev; 795 ctrl_cmd.cond = 0; 796 797 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 798 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 799 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD; 800 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 801 802 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 803 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n"); 804 return -1; 805 } 806 807 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 808 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n"); 809 return -1; 810 } 811 812 return 0; 813 } 814 815 /** 816 * Remove vxlan dest udp port for an interface. 817 * 818 * @param eth_dev 819 * Pointer to the structure rte_eth_dev 820 * @param udp_tnl 821 * udp tunnel conf 822 * 823 * @return 824 * On success return 0 825 * On failure return -1 826 */ 827 static int 828 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev, 829 struct rte_eth_udp_tunnel *udp_tnl) 830 { 831 struct lio_device *lio_dev = LIO_DEV(eth_dev); 832 struct lio_dev_ctrl_cmd ctrl_cmd; 833 struct lio_ctrl_pkt ctrl_pkt; 834 835 if (udp_tnl == NULL) 836 return -EINVAL; 837 838 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { 839 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 840 return -1; 841 } 842 843 /* flush added to prevent cmd failure 844 * incase the queue is full 845 */ 846 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 847 848 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 849 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 850 851 ctrl_cmd.eth_dev = eth_dev; 852 ctrl_cmd.cond = 0; 853 854 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 855 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 856 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL; 857 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 858 859 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 860 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n"); 861 return -1; 862 } 863 864 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 865 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n"); 866 return -1; 867 } 868 869 return 0; 870 } 871 872 static int 873 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) 874 { 875 struct lio_device *lio_dev = LIO_DEV(eth_dev); 876 struct lio_dev_ctrl_cmd ctrl_cmd; 877 struct lio_ctrl_pkt ctrl_pkt; 878 879 if (lio_dev->linfo.vlan_is_admin_assigned) 880 return -EPERM; 881 882 /* flush added to prevent cmd failure 883 * incase the queue is full 884 */ 885 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 886 887 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 888 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 889 890 ctrl_cmd.eth_dev = eth_dev; 891 ctrl_cmd.cond = 0; 892 893 ctrl_pkt.ncmd.s.cmd = on ? 894 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER; 895 ctrl_pkt.ncmd.s.param1 = vlan_id; 896 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 897 898 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 899 lio_dev_err(lio_dev, "Failed to %s VLAN port\n", 900 on ? "add" : "remove"); 901 return -1; 902 } 903 904 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 905 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n", 906 on ? "add" : "remove"); 907 return -1; 908 } 909 910 return 0; 911 } 912 913 static uint64_t 914 lio_hweight64(uint64_t w) 915 { 916 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul); 917 918 res = 919 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); 920 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; 921 res = res + (res >> 8); 922 res = res + (res >> 16); 923 924 return (res + (res >> 32)) & 0x00000000000000FFul; 925 } 926 927 static int 928 lio_dev_link_update(struct rte_eth_dev *eth_dev, 929 int wait_to_complete __rte_unused) 930 { 931 struct lio_device *lio_dev = LIO_DEV(eth_dev); 932 struct rte_eth_link link; 933 934 /* Initialize */ 935 memset(&link, 0, sizeof(link)); 936 link.link_status = ETH_LINK_DOWN; 937 link.link_speed = ETH_SPEED_NUM_NONE; 938 link.link_duplex = ETH_LINK_HALF_DUPLEX; 939 link.link_autoneg = ETH_LINK_AUTONEG; 940 941 /* Return what we found */ 942 if (lio_dev->linfo.link.s.link_up == 0) { 943 /* Interface is down */ 944 return rte_eth_linkstatus_set(eth_dev, &link); 945 } 946 947 link.link_status = ETH_LINK_UP; /* Interface is up */ 948 link.link_duplex = ETH_LINK_FULL_DUPLEX; 949 switch (lio_dev->linfo.link.s.speed) { 950 case LIO_LINK_SPEED_10000: 951 link.link_speed = ETH_SPEED_NUM_10G; 952 break; 953 case LIO_LINK_SPEED_25000: 954 link.link_speed = ETH_SPEED_NUM_25G; 955 break; 956 default: 957 link.link_speed = ETH_SPEED_NUM_NONE; 958 link.link_duplex = ETH_LINK_HALF_DUPLEX; 959 } 960 961 return rte_eth_linkstatus_set(eth_dev, &link); 962 } 963 964 /** 965 * \brief Net device enable, disable allmulticast 966 * @param eth_dev Pointer to the structure rte_eth_dev 967 * 968 * @return 969 * On success return 0 970 * On failure return negative errno 971 */ 972 static int 973 lio_change_dev_flag(struct rte_eth_dev *eth_dev) 974 { 975 struct lio_device *lio_dev = LIO_DEV(eth_dev); 976 struct lio_dev_ctrl_cmd ctrl_cmd; 977 struct lio_ctrl_pkt ctrl_pkt; 978 979 /* flush added to prevent cmd failure 980 * incase the queue is full 981 */ 982 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 983 984 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 985 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 986 987 ctrl_cmd.eth_dev = eth_dev; 988 ctrl_cmd.cond = 0; 989 990 /* Create a ctrl pkt command to be sent to core app. */ 991 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS; 992 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags; 993 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 994 995 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 996 lio_dev_err(lio_dev, "Failed to send change flag message\n"); 997 return -EAGAIN; 998 } 999 1000 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 1001 lio_dev_err(lio_dev, "Change dev flag command timed out\n"); 1002 return -ETIMEDOUT; 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int 1009 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 1010 { 1011 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1012 1013 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 1014 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1015 LIO_VF_TRUST_MIN_VERSION); 1016 return -EAGAIN; 1017 } 1018 1019 if (!lio_dev->intf_open) { 1020 lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n", 1021 lio_dev->port_id); 1022 return -EAGAIN; 1023 } 1024 1025 lio_dev->ifflags |= LIO_IFFLAG_PROMISC; 1026 return lio_change_dev_flag(eth_dev); 1027 } 1028 1029 static int 1030 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 1031 { 1032 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1033 1034 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 1035 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1036 LIO_VF_TRUST_MIN_VERSION); 1037 return -EAGAIN; 1038 } 1039 1040 if (!lio_dev->intf_open) { 1041 lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n", 1042 lio_dev->port_id); 1043 return -EAGAIN; 1044 } 1045 1046 lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC; 1047 return lio_change_dev_flag(eth_dev); 1048 } 1049 1050 static int 1051 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 1052 { 1053 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1054 1055 if (!lio_dev->intf_open) { 1056 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n", 1057 lio_dev->port_id); 1058 return -EAGAIN; 1059 } 1060 1061 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI; 1062 return lio_change_dev_flag(eth_dev); 1063 } 1064 1065 static int 1066 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 1067 { 1068 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1069 1070 if (!lio_dev->intf_open) { 1071 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n", 1072 lio_dev->port_id); 1073 return -EAGAIN; 1074 } 1075 1076 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI; 1077 return lio_change_dev_flag(eth_dev); 1078 } 1079 1080 static void 1081 lio_dev_rss_configure(struct rte_eth_dev *eth_dev) 1082 { 1083 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1084 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1085 struct rte_eth_rss_reta_entry64 reta_conf[8]; 1086 struct rte_eth_rss_conf rss_conf; 1087 uint16_t i; 1088 1089 /* Configure the RSS key and the RSS protocols used to compute 1090 * the RSS hash of input packets. 1091 */ 1092 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1093 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 1094 rss_state->hash_disable = 1; 1095 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1096 return; 1097 } 1098 1099 if (rss_conf.rss_key == NULL) 1100 rss_conf.rss_key = lio_rss_key; /* Default hash key */ 1101 1102 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1103 1104 memset(reta_conf, 0, sizeof(reta_conf)); 1105 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) { 1106 uint8_t q_idx, conf_idx, reta_idx; 1107 1108 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ? 1109 i % eth_dev->data->nb_rx_queues : 0); 1110 conf_idx = i / RTE_RETA_GROUP_SIZE; 1111 reta_idx = i % RTE_RETA_GROUP_SIZE; 1112 reta_conf[conf_idx].reta[reta_idx] = q_idx; 1113 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx); 1114 } 1115 1116 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ); 1117 } 1118 1119 static void 1120 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev) 1121 { 1122 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1123 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1124 struct rte_eth_rss_conf rss_conf; 1125 1126 switch (eth_dev->data->dev_conf.rxmode.mq_mode) { 1127 case ETH_MQ_RX_RSS: 1128 lio_dev_rss_configure(eth_dev); 1129 break; 1130 case ETH_MQ_RX_NONE: 1131 /* if mq_mode is none, disable rss mode. */ 1132 default: 1133 memset(&rss_conf, 0, sizeof(rss_conf)); 1134 rss_state->hash_disable = 1; 1135 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1136 } 1137 } 1138 1139 /** 1140 * Setup our receive queue/ringbuffer. This is the 1141 * queue the Octeon uses to send us packets and 1142 * responses. We are given a memory pool for our 1143 * packet buffers that are used to populate the receive 1144 * queue. 1145 * 1146 * @param eth_dev 1147 * Pointer to the structure rte_eth_dev 1148 * @param q_no 1149 * Queue number 1150 * @param num_rx_descs 1151 * Number of entries in the queue 1152 * @param socket_id 1153 * Where to allocate memory 1154 * @param rx_conf 1155 * Pointer to the struction rte_eth_rxconf 1156 * @param mp 1157 * Pointer to the packet pool 1158 * 1159 * @return 1160 * - On success, return 0 1161 * - On failure, return -1 1162 */ 1163 static int 1164 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1165 uint16_t num_rx_descs, unsigned int socket_id, 1166 const struct rte_eth_rxconf *rx_conf __rte_unused, 1167 struct rte_mempool *mp) 1168 { 1169 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1170 struct rte_pktmbuf_pool_private *mbp_priv; 1171 uint32_t fw_mapped_oq; 1172 uint16_t buf_size; 1173 1174 if (q_no >= lio_dev->nb_rx_queues) { 1175 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no); 1176 return -EINVAL; 1177 } 1178 1179 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no); 1180 1181 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no; 1182 1183 /* Free previous allocation if any */ 1184 if (eth_dev->data->rx_queues[q_no] != NULL) { 1185 lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]); 1186 eth_dev->data->rx_queues[q_no] = NULL; 1187 } 1188 1189 mbp_priv = rte_mempool_get_priv(mp); 1190 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1191 1192 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp, 1193 socket_id)) { 1194 lio_dev_err(lio_dev, "droq allocation failed\n"); 1195 return -1; 1196 } 1197 1198 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq]; 1199 1200 return 0; 1201 } 1202 1203 /** 1204 * Release the receive queue/ringbuffer. Called by 1205 * the upper layers. 1206 * 1207 * @param rxq 1208 * Opaque pointer to the receive queue to release 1209 * 1210 * @return 1211 * - nothing 1212 */ 1213 void 1214 lio_dev_rx_queue_release(void *rxq) 1215 { 1216 struct lio_droq *droq = rxq; 1217 int oq_no; 1218 1219 if (droq) { 1220 oq_no = droq->q_no; 1221 lio_delete_droq_queue(droq->lio_dev, oq_no); 1222 } 1223 } 1224 1225 /** 1226 * Allocate and initialize SW ring. Initialize associated HW registers. 1227 * 1228 * @param eth_dev 1229 * Pointer to structure rte_eth_dev 1230 * 1231 * @param q_no 1232 * Queue number 1233 * 1234 * @param num_tx_descs 1235 * Number of ringbuffer descriptors 1236 * 1237 * @param socket_id 1238 * NUMA socket id, used for memory allocations 1239 * 1240 * @param tx_conf 1241 * Pointer to the structure rte_eth_txconf 1242 * 1243 * @return 1244 * - On success, return 0 1245 * - On failure, return -errno value 1246 */ 1247 static int 1248 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1249 uint16_t num_tx_descs, unsigned int socket_id, 1250 const struct rte_eth_txconf *tx_conf __rte_unused) 1251 { 1252 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1253 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no; 1254 int retval; 1255 1256 if (q_no >= lio_dev->nb_tx_queues) { 1257 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no); 1258 return -EINVAL; 1259 } 1260 1261 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no); 1262 1263 /* Free previous allocation if any */ 1264 if (eth_dev->data->tx_queues[q_no] != NULL) { 1265 lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]); 1266 eth_dev->data->tx_queues[q_no] = NULL; 1267 } 1268 1269 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no], 1270 num_tx_descs, lio_dev, socket_id); 1271 1272 if (retval) { 1273 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n"); 1274 return retval; 1275 } 1276 1277 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq, 1278 lio_dev->instr_queue[fw_mapped_iq]->nb_desc, 1279 socket_id); 1280 1281 if (retval) { 1282 lio_delete_instruction_queue(lio_dev, fw_mapped_iq); 1283 return retval; 1284 } 1285 1286 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq]; 1287 1288 return 0; 1289 } 1290 1291 /** 1292 * Release the transmit queue/ringbuffer. Called by 1293 * the upper layers. 1294 * 1295 * @param txq 1296 * Opaque pointer to the transmit queue to release 1297 * 1298 * @return 1299 * - nothing 1300 */ 1301 void 1302 lio_dev_tx_queue_release(void *txq) 1303 { 1304 struct lio_instr_queue *tq = txq; 1305 uint32_t fw_mapped_iq_no; 1306 1307 1308 if (tq) { 1309 /* Free sg_list */ 1310 lio_delete_sglist(tq); 1311 1312 fw_mapped_iq_no = tq->txpciq.s.q_no; 1313 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no); 1314 } 1315 } 1316 1317 /** 1318 * Api to check link state. 1319 */ 1320 static void 1321 lio_dev_get_link_status(struct rte_eth_dev *eth_dev) 1322 { 1323 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1324 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1325 struct lio_link_status_resp *resp; 1326 union octeon_link_status *ls; 1327 struct lio_soft_command *sc; 1328 uint32_t resp_size; 1329 1330 if (!lio_dev->intf_open) 1331 return; 1332 1333 resp_size = sizeof(struct lio_link_status_resp); 1334 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1335 if (sc == NULL) 1336 return; 1337 1338 resp = (struct lio_link_status_resp *)sc->virtrptr; 1339 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1340 LIO_OPCODE_INFO, 0, 0, 0); 1341 1342 /* Setting wait time in seconds */ 1343 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1344 1345 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED) 1346 goto get_status_fail; 1347 1348 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1349 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1350 rte_delay_ms(1); 1351 } 1352 1353 if (resp->status) 1354 goto get_status_fail; 1355 1356 ls = &resp->link_info.link; 1357 1358 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3); 1359 1360 if (lio_dev->linfo.link.link_status64 != ls->link_status64) { 1361 if (ls->s.mtu < eth_dev->data->mtu) { 1362 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n", 1363 ls->s.mtu); 1364 eth_dev->data->mtu = ls->s.mtu; 1365 } 1366 lio_dev->linfo.link.link_status64 = ls->link_status64; 1367 lio_dev_link_update(eth_dev, 0); 1368 } 1369 1370 lio_free_soft_command(sc); 1371 1372 return; 1373 1374 get_status_fail: 1375 lio_free_soft_command(sc); 1376 } 1377 1378 /* This function will be invoked every LSC_TIMEOUT ns (100ms) 1379 * and will update link state if it changes. 1380 */ 1381 static void 1382 lio_sync_link_state_check(void *eth_dev) 1383 { 1384 struct lio_device *lio_dev = 1385 (((struct rte_eth_dev *)eth_dev)->data->dev_private); 1386 1387 if (lio_dev->port_configured) 1388 lio_dev_get_link_status(eth_dev); 1389 1390 /* Schedule periodic link status check. 1391 * Stop check if interface is close and start again while opening. 1392 */ 1393 if (lio_dev->intf_open) 1394 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, 1395 eth_dev); 1396 } 1397 1398 static int 1399 lio_dev_start(struct rte_eth_dev *eth_dev) 1400 { 1401 uint16_t mtu; 1402 uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1403 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1404 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1405 int ret = 0; 1406 1407 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id); 1408 1409 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 1410 return -1; 1411 1412 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) 1413 return -1; 1414 1415 /* Ready for link status updates */ 1416 lio_dev->intf_open = 1; 1417 rte_mb(); 1418 1419 /* Configure RSS if device configured with multiple RX queues. */ 1420 lio_dev_mq_rx_configure(eth_dev); 1421 1422 /* Before update the link info, 1423 * must set linfo.link.link_status64 to 0. 1424 */ 1425 lio_dev->linfo.link.link_status64 = 0; 1426 1427 /* start polling for lsc */ 1428 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, 1429 lio_sync_link_state_check, 1430 eth_dev); 1431 if (ret) { 1432 lio_dev_err(lio_dev, 1433 "link state check handler creation failed\n"); 1434 goto dev_lsc_handle_error; 1435 } 1436 1437 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout)) 1438 rte_delay_ms(1); 1439 1440 if (lio_dev->linfo.link.link_status64 == 0) { 1441 ret = -1; 1442 goto dev_mtu_set_error; 1443 } 1444 1445 mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN); 1446 if (mtu < RTE_ETHER_MIN_MTU) 1447 mtu = RTE_ETHER_MIN_MTU; 1448 1449 if (eth_dev->data->mtu != mtu) { 1450 ret = lio_dev_mtu_set(eth_dev, mtu); 1451 if (ret) 1452 goto dev_mtu_set_error; 1453 } 1454 1455 return 0; 1456 1457 dev_mtu_set_error: 1458 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1459 1460 dev_lsc_handle_error: 1461 lio_dev->intf_open = 0; 1462 lio_send_rx_ctrl_cmd(eth_dev, 0); 1463 1464 return ret; 1465 } 1466 1467 /* Stop device and disable input/output functions */ 1468 static int 1469 lio_dev_stop(struct rte_eth_dev *eth_dev) 1470 { 1471 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1472 1473 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id); 1474 eth_dev->data->dev_started = 0; 1475 lio_dev->intf_open = 0; 1476 rte_mb(); 1477 1478 /* Cancel callback if still running. */ 1479 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1480 1481 lio_send_rx_ctrl_cmd(eth_dev, 0); 1482 1483 lio_wait_for_instr_fetch(lio_dev); 1484 1485 /* Clear recorded link status */ 1486 lio_dev->linfo.link.link_status64 = 0; 1487 1488 return 0; 1489 } 1490 1491 static int 1492 lio_dev_set_link_up(struct rte_eth_dev *eth_dev) 1493 { 1494 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1495 1496 if (!lio_dev->intf_open) { 1497 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1498 return 0; 1499 } 1500 1501 if (lio_dev->linfo.link.s.link_up) { 1502 lio_dev_info(lio_dev, "Link is already UP\n"); 1503 return 0; 1504 } 1505 1506 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) { 1507 lio_dev_err(lio_dev, "Unable to set Link UP\n"); 1508 return -1; 1509 } 1510 1511 lio_dev->linfo.link.s.link_up = 1; 1512 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 1513 1514 return 0; 1515 } 1516 1517 static int 1518 lio_dev_set_link_down(struct rte_eth_dev *eth_dev) 1519 { 1520 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1521 1522 if (!lio_dev->intf_open) { 1523 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1524 return 0; 1525 } 1526 1527 if (!lio_dev->linfo.link.s.link_up) { 1528 lio_dev_info(lio_dev, "Link is already DOWN\n"); 1529 return 0; 1530 } 1531 1532 lio_dev->linfo.link.s.link_up = 0; 1533 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 1534 1535 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) { 1536 lio_dev->linfo.link.s.link_up = 1; 1537 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 1538 lio_dev_err(lio_dev, "Unable to set Link Down\n"); 1539 return -1; 1540 } 1541 1542 return 0; 1543 } 1544 1545 /** 1546 * Reset and stop the device. This occurs on the first 1547 * call to this routine. Subsequent calls will simply 1548 * return. NB: This will require the NIC to be rebooted. 1549 * 1550 * @param eth_dev 1551 * Pointer to the structure rte_eth_dev 1552 * 1553 * @return 1554 * - nothing 1555 */ 1556 static int 1557 lio_dev_close(struct rte_eth_dev *eth_dev) 1558 { 1559 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1560 int ret = 0; 1561 1562 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1563 return 0; 1564 1565 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id); 1566 1567 if (lio_dev->intf_open) 1568 ret = lio_dev_stop(eth_dev); 1569 1570 /* Reset ioq regs */ 1571 lio_dev->fn_list.setup_device_regs(lio_dev); 1572 1573 if (lio_dev->pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO) { 1574 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 1575 rte_delay_ms(LIO_PCI_FLR_WAIT); 1576 } 1577 1578 /* lio_free_mbox */ 1579 lio_dev->fn_list.free_mbox(lio_dev); 1580 1581 /* Free glist resources */ 1582 rte_free(lio_dev->glist_head); 1583 rte_free(lio_dev->glist_lock); 1584 lio_dev->glist_head = NULL; 1585 lio_dev->glist_lock = NULL; 1586 1587 lio_dev->port_configured = 0; 1588 1589 /* Delete all queues */ 1590 lio_dev_clear_queues(eth_dev); 1591 1592 return ret; 1593 } 1594 1595 /** 1596 * Enable tunnel rx checksum verification from firmware. 1597 */ 1598 static void 1599 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev) 1600 { 1601 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1602 struct lio_dev_ctrl_cmd ctrl_cmd; 1603 struct lio_ctrl_pkt ctrl_pkt; 1604 1605 /* flush added to prevent cmd failure 1606 * incase the queue is full 1607 */ 1608 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1609 1610 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1611 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1612 1613 ctrl_cmd.eth_dev = eth_dev; 1614 ctrl_cmd.cond = 0; 1615 1616 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL; 1617 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE; 1618 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1619 1620 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1621 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n"); 1622 return; 1623 } 1624 1625 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1626 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n"); 1627 } 1628 1629 /** 1630 * Enable checksum calculation for inner packet in a tunnel. 1631 */ 1632 static void 1633 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev) 1634 { 1635 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1636 struct lio_dev_ctrl_cmd ctrl_cmd; 1637 struct lio_ctrl_pkt ctrl_pkt; 1638 1639 /* flush added to prevent cmd failure 1640 * incase the queue is full 1641 */ 1642 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1643 1644 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1645 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1646 1647 ctrl_cmd.eth_dev = eth_dev; 1648 ctrl_cmd.cond = 0; 1649 1650 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL; 1651 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE; 1652 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1653 1654 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1655 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n"); 1656 return; 1657 } 1658 1659 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1660 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n"); 1661 } 1662 1663 static int 1664 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq, 1665 int num_rxq) 1666 { 1667 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1668 struct lio_dev_ctrl_cmd ctrl_cmd; 1669 struct lio_ctrl_pkt ctrl_pkt; 1670 1671 if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) { 1672 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1673 LIO_Q_RECONF_MIN_VERSION); 1674 return -ENOTSUP; 1675 } 1676 1677 /* flush added to prevent cmd failure 1678 * incase the queue is full 1679 */ 1680 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1681 1682 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1683 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1684 1685 ctrl_cmd.eth_dev = eth_dev; 1686 ctrl_cmd.cond = 0; 1687 1688 ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL; 1689 ctrl_pkt.ncmd.s.param1 = num_txq; 1690 ctrl_pkt.ncmd.s.param2 = num_rxq; 1691 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1692 1693 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1694 lio_dev_err(lio_dev, "Failed to send queue count control command\n"); 1695 return -1; 1696 } 1697 1698 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 1699 lio_dev_err(lio_dev, "Queue count control command timed out\n"); 1700 return -1; 1701 } 1702 1703 return 0; 1704 } 1705 1706 static int 1707 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq) 1708 { 1709 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1710 int ret; 1711 1712 if (lio_dev->nb_rx_queues != num_rxq || 1713 lio_dev->nb_tx_queues != num_txq) { 1714 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq)) 1715 return -1; 1716 lio_dev->nb_rx_queues = num_rxq; 1717 lio_dev->nb_tx_queues = num_txq; 1718 } 1719 1720 if (lio_dev->intf_open) { 1721 ret = lio_dev_stop(eth_dev); 1722 if (ret != 0) 1723 return ret; 1724 } 1725 1726 /* Reset ioq registers */ 1727 if (lio_dev->fn_list.setup_device_regs(lio_dev)) { 1728 lio_dev_err(lio_dev, "Failed to configure device registers\n"); 1729 return -1; 1730 } 1731 1732 return 0; 1733 } 1734 1735 static int 1736 lio_dev_configure(struct rte_eth_dev *eth_dev) 1737 { 1738 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1739 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1740 int retval, num_iqueues, num_oqueues; 1741 uint8_t mac[RTE_ETHER_ADDR_LEN], i; 1742 struct lio_if_cfg_resp *resp; 1743 struct lio_soft_command *sc; 1744 union lio_if_cfg if_cfg; 1745 uint32_t resp_size; 1746 1747 PMD_INIT_FUNC_TRACE(); 1748 1749 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 1750 eth_dev->data->dev_conf.rxmode.offloads |= 1751 DEV_RX_OFFLOAD_RSS_HASH; 1752 1753 /* Inform firmware about change in number of queues to use. 1754 * Disable IO queues and reset registers for re-configuration. 1755 */ 1756 if (lio_dev->port_configured) 1757 return lio_reconf_queues(eth_dev, 1758 eth_dev->data->nb_tx_queues, 1759 eth_dev->data->nb_rx_queues); 1760 1761 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues; 1762 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues; 1763 1764 /* Set max number of queues which can be re-configured. */ 1765 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues; 1766 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues; 1767 1768 resp_size = sizeof(struct lio_if_cfg_resp); 1769 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1770 if (sc == NULL) 1771 return -ENOMEM; 1772 1773 resp = (struct lio_if_cfg_resp *)sc->virtrptr; 1774 1775 /* Firmware doesn't have capability to reconfigure the queues, 1776 * Claim all queues, and use as many required 1777 */ 1778 if_cfg.if_cfg64 = 0; 1779 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues; 1780 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues; 1781 if_cfg.s.base_queue = 0; 1782 1783 if_cfg.s.gmx_port_id = lio_dev->pf_num; 1784 1785 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1786 LIO_OPCODE_IF_CFG, 0, 1787 if_cfg.if_cfg64, 0); 1788 1789 /* Setting wait time in seconds */ 1790 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1791 1792 retval = lio_send_soft_command(lio_dev, sc); 1793 if (retval == LIO_IQ_SEND_FAILED) { 1794 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n", 1795 retval); 1796 /* Soft instr is freed by driver in case of failure. */ 1797 goto nic_config_fail; 1798 } 1799 1800 /* Sleep on a wait queue till the cond flag indicates that the 1801 * response arrived or timed-out. 1802 */ 1803 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1804 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1805 lio_process_ordered_list(lio_dev); 1806 rte_delay_ms(1); 1807 } 1808 1809 retval = resp->status; 1810 if (retval) { 1811 lio_dev_err(lio_dev, "iq/oq config failed\n"); 1812 goto nic_config_fail; 1813 } 1814 1815 strlcpy(lio_dev->firmware_version, 1816 resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH); 1817 1818 lio_swap_8B_data((uint64_t *)(&resp->cfg_info), 1819 sizeof(struct octeon_if_cfg_info) >> 3); 1820 1821 num_iqueues = lio_hweight64(resp->cfg_info.iqmask); 1822 num_oqueues = lio_hweight64(resp->cfg_info.oqmask); 1823 1824 if (!(num_iqueues) || !(num_oqueues)) { 1825 lio_dev_err(lio_dev, 1826 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n", 1827 (unsigned long)resp->cfg_info.iqmask, 1828 (unsigned long)resp->cfg_info.oqmask); 1829 goto nic_config_fail; 1830 } 1831 1832 lio_dev_dbg(lio_dev, 1833 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n", 1834 eth_dev->data->port_id, 1835 (unsigned long)resp->cfg_info.iqmask, 1836 (unsigned long)resp->cfg_info.oqmask, 1837 num_iqueues, num_oqueues); 1838 1839 lio_dev->linfo.num_rxpciq = num_oqueues; 1840 lio_dev->linfo.num_txpciq = num_iqueues; 1841 1842 for (i = 0; i < num_oqueues; i++) { 1843 lio_dev->linfo.rxpciq[i].rxpciq64 = 1844 resp->cfg_info.linfo.rxpciq[i].rxpciq64; 1845 lio_dev_dbg(lio_dev, "index %d OQ %d\n", 1846 i, lio_dev->linfo.rxpciq[i].s.q_no); 1847 } 1848 1849 for (i = 0; i < num_iqueues; i++) { 1850 lio_dev->linfo.txpciq[i].txpciq64 = 1851 resp->cfg_info.linfo.txpciq[i].txpciq64; 1852 lio_dev_dbg(lio_dev, "index %d IQ %d\n", 1853 i, lio_dev->linfo.txpciq[i].s.q_no); 1854 } 1855 1856 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1857 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1858 lio_dev->linfo.link.link_status64 = 1859 resp->cfg_info.linfo.link.link_status64; 1860 1861 /* 64-bit swap required on LE machines */ 1862 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1); 1863 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 1864 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) + 1865 2 + i)); 1866 1867 /* Copy the permanent MAC address */ 1868 rte_ether_addr_copy((struct rte_ether_addr *)mac, 1869 ð_dev->data->mac_addrs[0]); 1870 1871 /* enable firmware checksum support for tunnel packets */ 1872 lio_enable_hw_tunnel_rx_checksum(eth_dev); 1873 lio_enable_hw_tunnel_tx_checksum(eth_dev); 1874 1875 lio_dev->glist_lock = 1876 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0); 1877 if (lio_dev->glist_lock == NULL) 1878 return -ENOMEM; 1879 1880 lio_dev->glist_head = 1881 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues, 1882 0); 1883 if (lio_dev->glist_head == NULL) { 1884 rte_free(lio_dev->glist_lock); 1885 lio_dev->glist_lock = NULL; 1886 return -ENOMEM; 1887 } 1888 1889 lio_dev_link_update(eth_dev, 0); 1890 1891 lio_dev->port_configured = 1; 1892 1893 lio_free_soft_command(sc); 1894 1895 /* Reset ioq regs */ 1896 lio_dev->fn_list.setup_device_regs(lio_dev); 1897 1898 /* Free iq_0 used during init */ 1899 lio_free_instr_queue0(lio_dev); 1900 1901 return 0; 1902 1903 nic_config_fail: 1904 lio_dev_err(lio_dev, "Failed retval %d\n", retval); 1905 lio_free_soft_command(sc); 1906 lio_free_instr_queue0(lio_dev); 1907 1908 return -ENODEV; 1909 } 1910 1911 /* Define our ethernet definitions */ 1912 static const struct eth_dev_ops liovf_eth_dev_ops = { 1913 .dev_configure = lio_dev_configure, 1914 .dev_start = lio_dev_start, 1915 .dev_stop = lio_dev_stop, 1916 .dev_set_link_up = lio_dev_set_link_up, 1917 .dev_set_link_down = lio_dev_set_link_down, 1918 .dev_close = lio_dev_close, 1919 .promiscuous_enable = lio_dev_promiscuous_enable, 1920 .promiscuous_disable = lio_dev_promiscuous_disable, 1921 .allmulticast_enable = lio_dev_allmulticast_enable, 1922 .allmulticast_disable = lio_dev_allmulticast_disable, 1923 .link_update = lio_dev_link_update, 1924 .stats_get = lio_dev_stats_get, 1925 .xstats_get = lio_dev_xstats_get, 1926 .xstats_get_names = lio_dev_xstats_get_names, 1927 .stats_reset = lio_dev_stats_reset, 1928 .xstats_reset = lio_dev_xstats_reset, 1929 .dev_infos_get = lio_dev_info_get, 1930 .vlan_filter_set = lio_dev_vlan_filter_set, 1931 .rx_queue_setup = lio_dev_rx_queue_setup, 1932 .rx_queue_release = lio_dev_rx_queue_release, 1933 .tx_queue_setup = lio_dev_tx_queue_setup, 1934 .tx_queue_release = lio_dev_tx_queue_release, 1935 .reta_update = lio_dev_rss_reta_update, 1936 .reta_query = lio_dev_rss_reta_query, 1937 .rss_hash_conf_get = lio_dev_rss_hash_conf_get, 1938 .rss_hash_update = lio_dev_rss_hash_update, 1939 .udp_tunnel_port_add = lio_dev_udp_tunnel_add, 1940 .udp_tunnel_port_del = lio_dev_udp_tunnel_del, 1941 .mtu_set = lio_dev_mtu_set, 1942 }; 1943 1944 static void 1945 lio_check_pf_hs_response(void *lio_dev) 1946 { 1947 struct lio_device *dev = lio_dev; 1948 1949 /* check till response arrives */ 1950 if (dev->pfvf_hsword.coproc_tics_per_us) 1951 return; 1952 1953 cn23xx_vf_handle_mbox(dev); 1954 1955 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev); 1956 } 1957 1958 /** 1959 * \brief Identify the LIO device and to map the BAR address space 1960 * @param lio_dev lio device 1961 */ 1962 static int 1963 lio_chip_specific_setup(struct lio_device *lio_dev) 1964 { 1965 struct rte_pci_device *pdev = lio_dev->pci_dev; 1966 uint32_t dev_id = pdev->id.device_id; 1967 const char *s; 1968 int ret = 1; 1969 1970 switch (dev_id) { 1971 case LIO_CN23XX_VF_VID: 1972 lio_dev->chip_id = LIO_CN23XX_VF_VID; 1973 ret = cn23xx_vf_setup_device(lio_dev); 1974 s = "CN23XX VF"; 1975 break; 1976 default: 1977 s = "?"; 1978 lio_dev_err(lio_dev, "Unsupported Chip\n"); 1979 } 1980 1981 if (!ret) 1982 lio_dev_info(lio_dev, "DEVICE : %s\n", s); 1983 1984 return ret; 1985 } 1986 1987 static int 1988 lio_first_time_init(struct lio_device *lio_dev, 1989 struct rte_pci_device *pdev) 1990 { 1991 int dpdk_queues; 1992 1993 PMD_INIT_FUNC_TRACE(); 1994 1995 /* set dpdk specific pci device pointer */ 1996 lio_dev->pci_dev = pdev; 1997 1998 /* Identify the LIO type and set device ops */ 1999 if (lio_chip_specific_setup(lio_dev)) { 2000 lio_dev_err(lio_dev, "Chip specific setup failed\n"); 2001 return -1; 2002 } 2003 2004 /* Initialize soft command buffer pool */ 2005 if (lio_setup_sc_buffer_pool(lio_dev)) { 2006 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n"); 2007 return -1; 2008 } 2009 2010 /* Initialize lists to manage the requests of different types that 2011 * arrive from applications for this lio device. 2012 */ 2013 lio_setup_response_list(lio_dev); 2014 2015 if (lio_dev->fn_list.setup_mbox(lio_dev)) { 2016 lio_dev_err(lio_dev, "Mailbox setup failed\n"); 2017 goto error; 2018 } 2019 2020 /* Check PF response */ 2021 lio_check_pf_hs_response((void *)lio_dev); 2022 2023 /* Do handshake and exit if incompatible PF driver */ 2024 if (cn23xx_pfvf_handshake(lio_dev)) 2025 goto error; 2026 2027 /* Request and wait for device reset. */ 2028 if (pdev->kdrv == RTE_PCI_KDRV_IGB_UIO) { 2029 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 2030 /* FLR wait time doubled as a precaution. */ 2031 rte_delay_ms(LIO_PCI_FLR_WAIT * 2); 2032 } 2033 2034 if (lio_dev->fn_list.setup_device_regs(lio_dev)) { 2035 lio_dev_err(lio_dev, "Failed to configure device registers\n"); 2036 goto error; 2037 } 2038 2039 if (lio_setup_instr_queue0(lio_dev)) { 2040 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n"); 2041 goto error; 2042 } 2043 2044 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf; 2045 2046 lio_dev->max_tx_queues = dpdk_queues; 2047 lio_dev->max_rx_queues = dpdk_queues; 2048 2049 /* Enable input and output queues for this device */ 2050 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 2051 goto error; 2052 2053 return 0; 2054 2055 error: 2056 lio_free_sc_buffer_pool(lio_dev); 2057 if (lio_dev->mbox[0]) 2058 lio_dev->fn_list.free_mbox(lio_dev); 2059 if (lio_dev->instr_queue[0]) 2060 lio_free_instr_queue0(lio_dev); 2061 2062 return -1; 2063 } 2064 2065 static int 2066 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2067 { 2068 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2069 2070 PMD_INIT_FUNC_TRACE(); 2071 2072 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2073 return 0; 2074 2075 /* lio_free_sc_buffer_pool */ 2076 lio_free_sc_buffer_pool(lio_dev); 2077 2078 return 0; 2079 } 2080 2081 static int 2082 lio_eth_dev_init(struct rte_eth_dev *eth_dev) 2083 { 2084 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 2085 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2086 2087 PMD_INIT_FUNC_TRACE(); 2088 2089 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts; 2090 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts; 2091 2092 /* Primary does the initialization. */ 2093 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2094 return 0; 2095 2096 rte_eth_copy_pci_info(eth_dev, pdev); 2097 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2098 2099 if (pdev->mem_resource[0].addr) { 2100 lio_dev->hw_addr = pdev->mem_resource[0].addr; 2101 } else { 2102 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n"); 2103 return -ENODEV; 2104 } 2105 2106 lio_dev->eth_dev = eth_dev; 2107 /* set lio device print string */ 2108 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string), 2109 "%s[%02x:%02x.%x]", pdev->driver->driver.name, 2110 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 2111 2112 lio_dev->port_id = eth_dev->data->port_id; 2113 2114 if (lio_first_time_init(lio_dev, pdev)) { 2115 lio_dev_err(lio_dev, "Device init failed\n"); 2116 return -EINVAL; 2117 } 2118 2119 eth_dev->dev_ops = &liovf_eth_dev_ops; 2120 eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0); 2121 if (eth_dev->data->mac_addrs == NULL) { 2122 lio_dev_err(lio_dev, 2123 "MAC addresses memory allocation failed\n"); 2124 eth_dev->dev_ops = NULL; 2125 eth_dev->rx_pkt_burst = NULL; 2126 eth_dev->tx_pkt_burst = NULL; 2127 return -ENOMEM; 2128 } 2129 2130 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING); 2131 rte_wmb(); 2132 2133 lio_dev->port_configured = 0; 2134 /* Always allow unicast packets */ 2135 lio_dev->ifflags |= LIO_IFFLAG_UNICAST; 2136 2137 return 0; 2138 } 2139 2140 static int 2141 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2142 struct rte_pci_device *pci_dev) 2143 { 2144 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device), 2145 lio_eth_dev_init); 2146 } 2147 2148 static int 2149 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2150 { 2151 return rte_eth_dev_pci_generic_remove(pci_dev, 2152 lio_eth_dev_uninit); 2153 } 2154 2155 /* Set of PCI devices this driver supports */ 2156 static const struct rte_pci_id pci_id_liovf_map[] = { 2157 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) }, 2158 { .vendor_id = 0, /* sentinel */ } 2159 }; 2160 2161 static struct rte_pci_driver rte_liovf_pmd = { 2162 .id_table = pci_id_liovf_map, 2163 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2164 .probe = lio_eth_dev_pci_probe, 2165 .remove = lio_eth_dev_pci_remove, 2166 }; 2167 2168 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd); 2169 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map); 2170 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci"); 2171 RTE_LOG_REGISTER(lio_logtype_init, pmd.net.liquidio.init, NOTICE); 2172 RTE_LOG_REGISTER(lio_logtype_driver, pmd.net.liquidio.driver, NOTICE); 2173