1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <rte_string_fns.h> 6 #include <ethdev_driver.h> 7 #include <ethdev_pci.h> 8 #include <rte_cycles.h> 9 #include <rte_malloc.h> 10 #include <rte_alarm.h> 11 #include <rte_ether.h> 12 13 #include "lio_logs.h" 14 #include "lio_23xx_vf.h" 15 #include "lio_ethdev.h" 16 #include "lio_rxtx.h" 17 18 /* Default RSS key in use */ 19 static uint8_t lio_rss_key[40] = { 20 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 21 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 22 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 23 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 24 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, 25 }; 26 27 static const struct rte_eth_desc_lim lio_rx_desc_lim = { 28 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS, 29 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS, 30 .nb_align = 1, 31 }; 32 33 static const struct rte_eth_desc_lim lio_tx_desc_lim = { 34 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS, 35 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS, 36 .nb_align = 1, 37 }; 38 39 /* Wait for control command to reach nic. */ 40 static uint16_t 41 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev, 42 struct lio_dev_ctrl_cmd *ctrl_cmd) 43 { 44 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 45 46 while ((ctrl_cmd->cond == 0) && --timeout) { 47 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 48 rte_delay_ms(1); 49 } 50 51 return !timeout; 52 } 53 54 /** 55 * \brief Send Rx control command 56 * @param eth_dev Pointer to the structure rte_eth_dev 57 * @param start_stop whether to start or stop 58 */ 59 static int 60 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop) 61 { 62 struct lio_device *lio_dev = LIO_DEV(eth_dev); 63 struct lio_dev_ctrl_cmd ctrl_cmd; 64 struct lio_ctrl_pkt ctrl_pkt; 65 66 /* flush added to prevent cmd failure 67 * incase the queue is full 68 */ 69 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 70 71 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 72 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 73 74 ctrl_cmd.eth_dev = eth_dev; 75 ctrl_cmd.cond = 0; 76 77 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL; 78 ctrl_pkt.ncmd.s.param1 = start_stop; 79 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 80 81 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 82 lio_dev_err(lio_dev, "Failed to send RX Control message\n"); 83 return -1; 84 } 85 86 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 87 lio_dev_err(lio_dev, "RX Control command timed out\n"); 88 return -1; 89 } 90 91 return 0; 92 } 93 94 /* store statistics names and its offset in stats structure */ 95 struct rte_lio_xstats_name_off { 96 char name[RTE_ETH_XSTATS_NAME_SIZE]; 97 unsigned int offset; 98 }; 99 100 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = { 101 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)}, 102 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)}, 103 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)}, 104 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)}, 105 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)}, 106 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)}, 107 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)}, 108 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)}, 109 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)}, 110 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)}, 111 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)}, 112 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)}, 113 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)}, 114 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) + 115 sizeof(struct octeon_rx_stats)}, 116 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) + 117 sizeof(struct octeon_rx_stats)}, 118 {"tx_broadcast_pkts", 119 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) + 120 sizeof(struct octeon_rx_stats)}, 121 {"tx_multicast_pkts", 122 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) + 123 sizeof(struct octeon_rx_stats)}, 124 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) + 125 sizeof(struct octeon_rx_stats)}, 126 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) + 127 sizeof(struct octeon_rx_stats)}, 128 {"tx_total_collisions", (offsetof(struct octeon_tx_stats, 129 total_collisions)) + 130 sizeof(struct octeon_rx_stats)}, 131 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) + 132 sizeof(struct octeon_rx_stats)}, 133 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) + 134 sizeof(struct octeon_rx_stats)}, 135 }; 136 137 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings) 138 139 /* Get hw stats of the port */ 140 static int 141 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, 142 unsigned int n) 143 { 144 struct lio_device *lio_dev = LIO_DEV(eth_dev); 145 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 146 struct octeon_link_stats *hw_stats; 147 struct lio_link_stats_resp *resp; 148 struct lio_soft_command *sc; 149 uint32_t resp_size; 150 unsigned int i; 151 int retval; 152 153 if (!lio_dev->intf_open) { 154 lio_dev_err(lio_dev, "Port %d down\n", 155 lio_dev->port_id); 156 return -EINVAL; 157 } 158 159 if (n < LIO_NB_XSTATS) 160 return LIO_NB_XSTATS; 161 162 resp_size = sizeof(struct lio_link_stats_resp); 163 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 164 if (sc == NULL) 165 return -ENOMEM; 166 167 resp = (struct lio_link_stats_resp *)sc->virtrptr; 168 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 169 LIO_OPCODE_PORT_STATS, 0, 0, 0); 170 171 /* Setting wait time in seconds */ 172 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 173 174 retval = lio_send_soft_command(lio_dev, sc); 175 if (retval == LIO_IQ_SEND_FAILED) { 176 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n", 177 retval); 178 goto get_stats_fail; 179 } 180 181 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 182 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 183 lio_process_ordered_list(lio_dev); 184 rte_delay_ms(1); 185 } 186 187 retval = resp->status; 188 if (retval) { 189 lio_dev_err(lio_dev, "failed to get port stats from firmware\n"); 190 goto get_stats_fail; 191 } 192 193 lio_swap_8B_data((uint64_t *)(&resp->link_stats), 194 sizeof(struct octeon_link_stats) >> 3); 195 196 hw_stats = &resp->link_stats; 197 198 for (i = 0; i < LIO_NB_XSTATS; i++) { 199 xstats[i].id = i; 200 xstats[i].value = 201 *(uint64_t *)(((char *)hw_stats) + 202 rte_lio_stats_strings[i].offset); 203 } 204 205 lio_free_soft_command(sc); 206 207 return LIO_NB_XSTATS; 208 209 get_stats_fail: 210 lio_free_soft_command(sc); 211 212 return -1; 213 } 214 215 static int 216 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev, 217 struct rte_eth_xstat_name *xstats_names, 218 unsigned limit __rte_unused) 219 { 220 struct lio_device *lio_dev = LIO_DEV(eth_dev); 221 unsigned int i; 222 223 if (!lio_dev->intf_open) { 224 lio_dev_err(lio_dev, "Port %d down\n", 225 lio_dev->port_id); 226 return -EINVAL; 227 } 228 229 if (xstats_names == NULL) 230 return LIO_NB_XSTATS; 231 232 /* Note: limit checked in rte_eth_xstats_names() */ 233 234 for (i = 0; i < LIO_NB_XSTATS; i++) { 235 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), 236 "%s", rte_lio_stats_strings[i].name); 237 } 238 239 return LIO_NB_XSTATS; 240 } 241 242 /* Reset hw stats for the port */ 243 static int 244 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev) 245 { 246 struct lio_device *lio_dev = LIO_DEV(eth_dev); 247 struct lio_dev_ctrl_cmd ctrl_cmd; 248 struct lio_ctrl_pkt ctrl_pkt; 249 int ret; 250 251 if (!lio_dev->intf_open) { 252 lio_dev_err(lio_dev, "Port %d down\n", 253 lio_dev->port_id); 254 return -EINVAL; 255 } 256 257 /* flush added to prevent cmd failure 258 * incase the queue is full 259 */ 260 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 261 262 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 263 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 264 265 ctrl_cmd.eth_dev = eth_dev; 266 ctrl_cmd.cond = 0; 267 268 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS; 269 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 270 271 ret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt); 272 if (ret != 0) { 273 lio_dev_err(lio_dev, "Failed to send clear stats command\n"); 274 return ret; 275 } 276 277 ret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd); 278 if (ret != 0) { 279 lio_dev_err(lio_dev, "Clear stats command timed out\n"); 280 return ret; 281 } 282 283 /* clear stored per queue stats */ 284 RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0); 285 return (*eth_dev->dev_ops->stats_reset)(eth_dev); 286 } 287 288 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */ 289 static int 290 lio_dev_stats_get(struct rte_eth_dev *eth_dev, 291 struct rte_eth_stats *stats) 292 { 293 struct lio_device *lio_dev = LIO_DEV(eth_dev); 294 struct lio_droq_stats *oq_stats; 295 struct lio_iq_stats *iq_stats; 296 struct lio_instr_queue *txq; 297 struct lio_droq *droq; 298 int i, iq_no, oq_no; 299 uint64_t bytes = 0; 300 uint64_t pkts = 0; 301 uint64_t drop = 0; 302 303 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 304 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 305 txq = lio_dev->instr_queue[iq_no]; 306 if (txq != NULL) { 307 iq_stats = &txq->stats; 308 pkts += iq_stats->tx_done; 309 drop += iq_stats->tx_dropped; 310 bytes += iq_stats->tx_tot_bytes; 311 } 312 } 313 314 stats->opackets = pkts; 315 stats->obytes = bytes; 316 stats->oerrors = drop; 317 318 pkts = 0; 319 drop = 0; 320 bytes = 0; 321 322 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 323 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 324 droq = lio_dev->droq[oq_no]; 325 if (droq != NULL) { 326 oq_stats = &droq->stats; 327 pkts += oq_stats->rx_pkts_received; 328 drop += (oq_stats->rx_dropped + 329 oq_stats->dropped_toomany + 330 oq_stats->dropped_nomem); 331 bytes += oq_stats->rx_bytes_received; 332 } 333 } 334 stats->ibytes = bytes; 335 stats->ipackets = pkts; 336 stats->ierrors = drop; 337 338 return 0; 339 } 340 341 static int 342 lio_dev_stats_reset(struct rte_eth_dev *eth_dev) 343 { 344 struct lio_device *lio_dev = LIO_DEV(eth_dev); 345 struct lio_droq_stats *oq_stats; 346 struct lio_iq_stats *iq_stats; 347 struct lio_instr_queue *txq; 348 struct lio_droq *droq; 349 int i, iq_no, oq_no; 350 351 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 352 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 353 txq = lio_dev->instr_queue[iq_no]; 354 if (txq != NULL) { 355 iq_stats = &txq->stats; 356 memset(iq_stats, 0, sizeof(struct lio_iq_stats)); 357 } 358 } 359 360 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 361 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 362 droq = lio_dev->droq[oq_no]; 363 if (droq != NULL) { 364 oq_stats = &droq->stats; 365 memset(oq_stats, 0, sizeof(struct lio_droq_stats)); 366 } 367 } 368 369 return 0; 370 } 371 372 static int 373 lio_dev_info_get(struct rte_eth_dev *eth_dev, 374 struct rte_eth_dev_info *devinfo) 375 { 376 struct lio_device *lio_dev = LIO_DEV(eth_dev); 377 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 378 379 switch (pci_dev->id.subsystem_device_id) { 380 /* CN23xx 10G cards */ 381 case PCI_SUBSYS_DEV_ID_CN2350_210: 382 case PCI_SUBSYS_DEV_ID_CN2360_210: 383 case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3: 384 case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3: 385 case PCI_SUBSYS_DEV_ID_CN2350_210SVPT: 386 case PCI_SUBSYS_DEV_ID_CN2360_210SVPT: 387 devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G; 388 break; 389 /* CN23xx 25G cards */ 390 case PCI_SUBSYS_DEV_ID_CN2350_225: 391 case PCI_SUBSYS_DEV_ID_CN2360_225: 392 devinfo->speed_capa = RTE_ETH_LINK_SPEED_25G; 393 break; 394 default: 395 devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G; 396 lio_dev_err(lio_dev, 397 "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n"); 398 return -EINVAL; 399 } 400 401 devinfo->max_rx_queues = lio_dev->max_rx_queues; 402 devinfo->max_tx_queues = lio_dev->max_tx_queues; 403 404 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE; 405 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN; 406 407 devinfo->max_mac_addrs = 1; 408 409 devinfo->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 410 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 411 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 412 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 413 RTE_ETH_RX_OFFLOAD_RSS_HASH); 414 devinfo->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 415 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 416 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 417 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM); 418 419 devinfo->rx_desc_lim = lio_rx_desc_lim; 420 devinfo->tx_desc_lim = lio_tx_desc_lim; 421 422 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ; 423 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ; 424 devinfo->flow_type_rss_offloads = (RTE_ETH_RSS_IPV4 | 425 RTE_ETH_RSS_NONFRAG_IPV4_TCP | 426 RTE_ETH_RSS_IPV6 | 427 RTE_ETH_RSS_NONFRAG_IPV6_TCP | 428 RTE_ETH_RSS_IPV6_EX | 429 RTE_ETH_RSS_IPV6_TCP_EX); 430 return 0; 431 } 432 433 static int 434 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 435 { 436 struct lio_device *lio_dev = LIO_DEV(eth_dev); 437 struct lio_dev_ctrl_cmd ctrl_cmd; 438 struct lio_ctrl_pkt ctrl_pkt; 439 440 PMD_INIT_FUNC_TRACE(); 441 442 if (!lio_dev->intf_open) { 443 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n", 444 lio_dev->port_id); 445 return -EINVAL; 446 } 447 448 /* flush added to prevent cmd failure 449 * incase the queue is full 450 */ 451 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 452 453 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 454 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 455 456 ctrl_cmd.eth_dev = eth_dev; 457 ctrl_cmd.cond = 0; 458 459 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU; 460 ctrl_pkt.ncmd.s.param1 = mtu; 461 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 462 463 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 464 lio_dev_err(lio_dev, "Failed to send command to change MTU\n"); 465 return -1; 466 } 467 468 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 469 lio_dev_err(lio_dev, "Command to change MTU timed out\n"); 470 return -1; 471 } 472 473 return 0; 474 } 475 476 static int 477 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev, 478 struct rte_eth_rss_reta_entry64 *reta_conf, 479 uint16_t reta_size) 480 { 481 struct lio_device *lio_dev = LIO_DEV(eth_dev); 482 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 483 struct lio_rss_set *rss_param; 484 struct lio_dev_ctrl_cmd ctrl_cmd; 485 struct lio_ctrl_pkt ctrl_pkt; 486 int i, j, index; 487 488 if (!lio_dev->intf_open) { 489 lio_dev_err(lio_dev, "Port %d down, can't update reta\n", 490 lio_dev->port_id); 491 return -EINVAL; 492 } 493 494 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 495 lio_dev_err(lio_dev, 496 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 497 reta_size, LIO_RSS_MAX_TABLE_SZ); 498 return -EINVAL; 499 } 500 501 /* flush added to prevent cmd failure 502 * incase the queue is full 503 */ 504 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 505 506 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 507 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 508 509 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 510 511 ctrl_cmd.eth_dev = eth_dev; 512 ctrl_cmd.cond = 0; 513 514 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 515 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 516 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 517 518 rss_param->param.flags = 0xF; 519 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED; 520 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ; 521 522 for (i = 0; i < (reta_size / RTE_ETH_RETA_GROUP_SIZE); i++) { 523 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) { 524 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) { 525 index = (i * RTE_ETH_RETA_GROUP_SIZE) + j; 526 rss_state->itable[index] = reta_conf[i].reta[j]; 527 } 528 } 529 } 530 531 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ; 532 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size); 533 534 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 535 536 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 537 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 538 return -1; 539 } 540 541 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 542 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 543 return -1; 544 } 545 546 return 0; 547 } 548 549 static int 550 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev, 551 struct rte_eth_rss_reta_entry64 *reta_conf, 552 uint16_t reta_size) 553 { 554 struct lio_device *lio_dev = LIO_DEV(eth_dev); 555 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 556 int i, num; 557 558 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 559 lio_dev_err(lio_dev, 560 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 561 reta_size, LIO_RSS_MAX_TABLE_SZ); 562 return -EINVAL; 563 } 564 565 num = reta_size / RTE_ETH_RETA_GROUP_SIZE; 566 567 for (i = 0; i < num; i++) { 568 memcpy(reta_conf->reta, 569 &rss_state->itable[i * RTE_ETH_RETA_GROUP_SIZE], 570 RTE_ETH_RETA_GROUP_SIZE); 571 reta_conf++; 572 } 573 574 return 0; 575 } 576 577 static int 578 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 579 struct rte_eth_rss_conf *rss_conf) 580 { 581 struct lio_device *lio_dev = LIO_DEV(eth_dev); 582 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 583 uint8_t *hash_key = NULL; 584 uint64_t rss_hf = 0; 585 586 if (rss_state->hash_disable) { 587 lio_dev_info(lio_dev, "RSS disabled in nic\n"); 588 rss_conf->rss_hf = 0; 589 return 0; 590 } 591 592 /* Get key value */ 593 hash_key = rss_conf->rss_key; 594 if (hash_key != NULL) 595 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size); 596 597 if (rss_state->ip) 598 rss_hf |= RTE_ETH_RSS_IPV4; 599 if (rss_state->tcp_hash) 600 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP; 601 if (rss_state->ipv6) 602 rss_hf |= RTE_ETH_RSS_IPV6; 603 if (rss_state->ipv6_tcp_hash) 604 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP; 605 if (rss_state->ipv6_ex) 606 rss_hf |= RTE_ETH_RSS_IPV6_EX; 607 if (rss_state->ipv6_tcp_ex_hash) 608 rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX; 609 610 rss_conf->rss_hf = rss_hf; 611 612 return 0; 613 } 614 615 static int 616 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev, 617 struct rte_eth_rss_conf *rss_conf) 618 { 619 struct lio_device *lio_dev = LIO_DEV(eth_dev); 620 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 621 struct lio_rss_set *rss_param; 622 struct lio_dev_ctrl_cmd ctrl_cmd; 623 struct lio_ctrl_pkt ctrl_pkt; 624 625 if (!lio_dev->intf_open) { 626 lio_dev_err(lio_dev, "Port %d down, can't update hash\n", 627 lio_dev->port_id); 628 return -EINVAL; 629 } 630 631 /* flush added to prevent cmd failure 632 * incase the queue is full 633 */ 634 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 635 636 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 637 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 638 639 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 640 641 ctrl_cmd.eth_dev = eth_dev; 642 ctrl_cmd.cond = 0; 643 644 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 645 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 646 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 647 648 rss_param->param.flags = 0xF; 649 650 if (rss_conf->rss_key) { 651 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED; 652 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ; 653 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ; 654 memcpy(rss_state->hash_key, rss_conf->rss_key, 655 rss_state->hash_key_size); 656 memcpy(rss_param->key, rss_state->hash_key, 657 rss_state->hash_key_size); 658 } 659 660 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 661 /* Can't disable rss through hash flags, 662 * if it is enabled by default during init 663 */ 664 if (!rss_state->hash_disable) 665 return -EINVAL; 666 667 /* This is for --disable-rss during testpmd launch */ 668 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS; 669 } else { 670 uint32_t hashinfo = 0; 671 672 /* Can't enable rss if disabled by default during init */ 673 if (rss_state->hash_disable) 674 return -EINVAL; 675 676 if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) { 677 hashinfo |= LIO_RSS_HASH_IPV4; 678 rss_state->ip = 1; 679 } else { 680 rss_state->ip = 0; 681 } 682 683 if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) { 684 hashinfo |= LIO_RSS_HASH_TCP_IPV4; 685 rss_state->tcp_hash = 1; 686 } else { 687 rss_state->tcp_hash = 0; 688 } 689 690 if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6) { 691 hashinfo |= LIO_RSS_HASH_IPV6; 692 rss_state->ipv6 = 1; 693 } else { 694 rss_state->ipv6 = 0; 695 } 696 697 if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) { 698 hashinfo |= LIO_RSS_HASH_TCP_IPV6; 699 rss_state->ipv6_tcp_hash = 1; 700 } else { 701 rss_state->ipv6_tcp_hash = 0; 702 } 703 704 if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_EX) { 705 hashinfo |= LIO_RSS_HASH_IPV6_EX; 706 rss_state->ipv6_ex = 1; 707 } else { 708 rss_state->ipv6_ex = 0; 709 } 710 711 if (rss_conf->rss_hf & RTE_ETH_RSS_IPV6_TCP_EX) { 712 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX; 713 rss_state->ipv6_tcp_ex_hash = 1; 714 } else { 715 rss_state->ipv6_tcp_ex_hash = 0; 716 } 717 718 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED; 719 rss_param->param.hashinfo = hashinfo; 720 } 721 722 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 723 724 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 725 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 726 return -1; 727 } 728 729 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 730 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 731 return -1; 732 } 733 734 return 0; 735 } 736 737 /** 738 * Add vxlan dest udp port for an interface. 739 * 740 * @param eth_dev 741 * Pointer to the structure rte_eth_dev 742 * @param udp_tnl 743 * udp tunnel conf 744 * 745 * @return 746 * On success return 0 747 * On failure return -1 748 */ 749 static int 750 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev, 751 struct rte_eth_udp_tunnel *udp_tnl) 752 { 753 struct lio_device *lio_dev = LIO_DEV(eth_dev); 754 struct lio_dev_ctrl_cmd ctrl_cmd; 755 struct lio_ctrl_pkt ctrl_pkt; 756 757 if (udp_tnl == NULL) 758 return -EINVAL; 759 760 if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 761 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 762 return -1; 763 } 764 765 /* flush added to prevent cmd failure 766 * incase the queue is full 767 */ 768 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 769 770 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 771 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 772 773 ctrl_cmd.eth_dev = eth_dev; 774 ctrl_cmd.cond = 0; 775 776 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 777 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 778 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD; 779 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 780 781 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 782 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n"); 783 return -1; 784 } 785 786 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 787 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n"); 788 return -1; 789 } 790 791 return 0; 792 } 793 794 /** 795 * Remove vxlan dest udp port for an interface. 796 * 797 * @param eth_dev 798 * Pointer to the structure rte_eth_dev 799 * @param udp_tnl 800 * udp tunnel conf 801 * 802 * @return 803 * On success return 0 804 * On failure return -1 805 */ 806 static int 807 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev, 808 struct rte_eth_udp_tunnel *udp_tnl) 809 { 810 struct lio_device *lio_dev = LIO_DEV(eth_dev); 811 struct lio_dev_ctrl_cmd ctrl_cmd; 812 struct lio_ctrl_pkt ctrl_pkt; 813 814 if (udp_tnl == NULL) 815 return -EINVAL; 816 817 if (udp_tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN) { 818 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 819 return -1; 820 } 821 822 /* flush added to prevent cmd failure 823 * incase the queue is full 824 */ 825 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 826 827 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 828 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 829 830 ctrl_cmd.eth_dev = eth_dev; 831 ctrl_cmd.cond = 0; 832 833 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 834 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 835 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL; 836 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 837 838 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 839 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n"); 840 return -1; 841 } 842 843 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 844 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n"); 845 return -1; 846 } 847 848 return 0; 849 } 850 851 static int 852 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) 853 { 854 struct lio_device *lio_dev = LIO_DEV(eth_dev); 855 struct lio_dev_ctrl_cmd ctrl_cmd; 856 struct lio_ctrl_pkt ctrl_pkt; 857 858 if (lio_dev->linfo.vlan_is_admin_assigned) 859 return -EPERM; 860 861 /* flush added to prevent cmd failure 862 * incase the queue is full 863 */ 864 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 865 866 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 867 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 868 869 ctrl_cmd.eth_dev = eth_dev; 870 ctrl_cmd.cond = 0; 871 872 ctrl_pkt.ncmd.s.cmd = on ? 873 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER; 874 ctrl_pkt.ncmd.s.param1 = vlan_id; 875 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 876 877 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 878 lio_dev_err(lio_dev, "Failed to %s VLAN port\n", 879 on ? "add" : "remove"); 880 return -1; 881 } 882 883 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 884 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n", 885 on ? "add" : "remove"); 886 return -1; 887 } 888 889 return 0; 890 } 891 892 static uint64_t 893 lio_hweight64(uint64_t w) 894 { 895 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul); 896 897 res = 898 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); 899 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; 900 res = res + (res >> 8); 901 res = res + (res >> 16); 902 903 return (res + (res >> 32)) & 0x00000000000000FFul; 904 } 905 906 static int 907 lio_dev_link_update(struct rte_eth_dev *eth_dev, 908 int wait_to_complete __rte_unused) 909 { 910 struct lio_device *lio_dev = LIO_DEV(eth_dev); 911 struct rte_eth_link link; 912 913 /* Initialize */ 914 memset(&link, 0, sizeof(link)); 915 link.link_status = RTE_ETH_LINK_DOWN; 916 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 917 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 918 link.link_autoneg = RTE_ETH_LINK_AUTONEG; 919 920 /* Return what we found */ 921 if (lio_dev->linfo.link.s.link_up == 0) { 922 /* Interface is down */ 923 return rte_eth_linkstatus_set(eth_dev, &link); 924 } 925 926 link.link_status = RTE_ETH_LINK_UP; /* Interface is up */ 927 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 928 switch (lio_dev->linfo.link.s.speed) { 929 case LIO_LINK_SPEED_10000: 930 link.link_speed = RTE_ETH_SPEED_NUM_10G; 931 break; 932 case LIO_LINK_SPEED_25000: 933 link.link_speed = RTE_ETH_SPEED_NUM_25G; 934 break; 935 default: 936 link.link_speed = RTE_ETH_SPEED_NUM_NONE; 937 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 938 } 939 940 return rte_eth_linkstatus_set(eth_dev, &link); 941 } 942 943 /** 944 * \brief Net device enable, disable allmulticast 945 * @param eth_dev Pointer to the structure rte_eth_dev 946 * 947 * @return 948 * On success return 0 949 * On failure return negative errno 950 */ 951 static int 952 lio_change_dev_flag(struct rte_eth_dev *eth_dev) 953 { 954 struct lio_device *lio_dev = LIO_DEV(eth_dev); 955 struct lio_dev_ctrl_cmd ctrl_cmd; 956 struct lio_ctrl_pkt ctrl_pkt; 957 958 /* flush added to prevent cmd failure 959 * incase the queue is full 960 */ 961 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 962 963 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 964 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 965 966 ctrl_cmd.eth_dev = eth_dev; 967 ctrl_cmd.cond = 0; 968 969 /* Create a ctrl pkt command to be sent to core app. */ 970 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS; 971 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags; 972 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 973 974 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 975 lio_dev_err(lio_dev, "Failed to send change flag message\n"); 976 return -EAGAIN; 977 } 978 979 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 980 lio_dev_err(lio_dev, "Change dev flag command timed out\n"); 981 return -ETIMEDOUT; 982 } 983 984 return 0; 985 } 986 987 static int 988 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 989 { 990 struct lio_device *lio_dev = LIO_DEV(eth_dev); 991 992 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 993 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 994 LIO_VF_TRUST_MIN_VERSION); 995 return -EAGAIN; 996 } 997 998 if (!lio_dev->intf_open) { 999 lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n", 1000 lio_dev->port_id); 1001 return -EAGAIN; 1002 } 1003 1004 lio_dev->ifflags |= LIO_IFFLAG_PROMISC; 1005 return lio_change_dev_flag(eth_dev); 1006 } 1007 1008 static int 1009 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 1010 { 1011 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1012 1013 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 1014 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1015 LIO_VF_TRUST_MIN_VERSION); 1016 return -EAGAIN; 1017 } 1018 1019 if (!lio_dev->intf_open) { 1020 lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n", 1021 lio_dev->port_id); 1022 return -EAGAIN; 1023 } 1024 1025 lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC; 1026 return lio_change_dev_flag(eth_dev); 1027 } 1028 1029 static int 1030 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 1031 { 1032 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1033 1034 if (!lio_dev->intf_open) { 1035 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n", 1036 lio_dev->port_id); 1037 return -EAGAIN; 1038 } 1039 1040 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI; 1041 return lio_change_dev_flag(eth_dev); 1042 } 1043 1044 static int 1045 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 1046 { 1047 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1048 1049 if (!lio_dev->intf_open) { 1050 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n", 1051 lio_dev->port_id); 1052 return -EAGAIN; 1053 } 1054 1055 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI; 1056 return lio_change_dev_flag(eth_dev); 1057 } 1058 1059 static void 1060 lio_dev_rss_configure(struct rte_eth_dev *eth_dev) 1061 { 1062 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1063 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1064 struct rte_eth_rss_reta_entry64 reta_conf[8]; 1065 struct rte_eth_rss_conf rss_conf; 1066 uint16_t i; 1067 1068 /* Configure the RSS key and the RSS protocols used to compute 1069 * the RSS hash of input packets. 1070 */ 1071 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1072 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 1073 rss_state->hash_disable = 1; 1074 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1075 return; 1076 } 1077 1078 if (rss_conf.rss_key == NULL) 1079 rss_conf.rss_key = lio_rss_key; /* Default hash key */ 1080 1081 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1082 1083 memset(reta_conf, 0, sizeof(reta_conf)); 1084 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) { 1085 uint8_t q_idx, conf_idx, reta_idx; 1086 1087 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ? 1088 i % eth_dev->data->nb_rx_queues : 0); 1089 conf_idx = i / RTE_ETH_RETA_GROUP_SIZE; 1090 reta_idx = i % RTE_ETH_RETA_GROUP_SIZE; 1091 reta_conf[conf_idx].reta[reta_idx] = q_idx; 1092 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx); 1093 } 1094 1095 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ); 1096 } 1097 1098 static void 1099 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev) 1100 { 1101 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1102 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1103 struct rte_eth_rss_conf rss_conf; 1104 1105 switch (eth_dev->data->dev_conf.rxmode.mq_mode) { 1106 case RTE_ETH_MQ_RX_RSS: 1107 lio_dev_rss_configure(eth_dev); 1108 break; 1109 case RTE_ETH_MQ_RX_NONE: 1110 /* if mq_mode is none, disable rss mode. */ 1111 default: 1112 memset(&rss_conf, 0, sizeof(rss_conf)); 1113 rss_state->hash_disable = 1; 1114 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1115 } 1116 } 1117 1118 /** 1119 * Setup our receive queue/ringbuffer. This is the 1120 * queue the Octeon uses to send us packets and 1121 * responses. We are given a memory pool for our 1122 * packet buffers that are used to populate the receive 1123 * queue. 1124 * 1125 * @param eth_dev 1126 * Pointer to the structure rte_eth_dev 1127 * @param q_no 1128 * Queue number 1129 * @param num_rx_descs 1130 * Number of entries in the queue 1131 * @param socket_id 1132 * Where to allocate memory 1133 * @param rx_conf 1134 * Pointer to the struction rte_eth_rxconf 1135 * @param mp 1136 * Pointer to the packet pool 1137 * 1138 * @return 1139 * - On success, return 0 1140 * - On failure, return -1 1141 */ 1142 static int 1143 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1144 uint16_t num_rx_descs, unsigned int socket_id, 1145 const struct rte_eth_rxconf *rx_conf __rte_unused, 1146 struct rte_mempool *mp) 1147 { 1148 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1149 struct rte_pktmbuf_pool_private *mbp_priv; 1150 uint32_t fw_mapped_oq; 1151 uint16_t buf_size; 1152 1153 if (q_no >= lio_dev->nb_rx_queues) { 1154 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no); 1155 return -EINVAL; 1156 } 1157 1158 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no); 1159 1160 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no; 1161 1162 /* Free previous allocation if any */ 1163 if (eth_dev->data->rx_queues[q_no] != NULL) { 1164 lio_dev_rx_queue_release(eth_dev, q_no); 1165 eth_dev->data->rx_queues[q_no] = NULL; 1166 } 1167 1168 mbp_priv = rte_mempool_get_priv(mp); 1169 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1170 1171 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp, 1172 socket_id)) { 1173 lio_dev_err(lio_dev, "droq allocation failed\n"); 1174 return -1; 1175 } 1176 1177 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq]; 1178 1179 return 0; 1180 } 1181 1182 /** 1183 * Release the receive queue/ringbuffer. Called by 1184 * the upper layers. 1185 * 1186 * @param eth_dev 1187 * Pointer to Ethernet device structure. 1188 * @param q_no 1189 * Receive queue index. 1190 * 1191 * @return 1192 * - nothing 1193 */ 1194 void 1195 lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no) 1196 { 1197 struct lio_droq *droq = dev->data->rx_queues[q_no]; 1198 int oq_no; 1199 1200 if (droq) { 1201 oq_no = droq->q_no; 1202 lio_delete_droq_queue(droq->lio_dev, oq_no); 1203 } 1204 } 1205 1206 /** 1207 * Allocate and initialize SW ring. Initialize associated HW registers. 1208 * 1209 * @param eth_dev 1210 * Pointer to structure rte_eth_dev 1211 * 1212 * @param q_no 1213 * Queue number 1214 * 1215 * @param num_tx_descs 1216 * Number of ringbuffer descriptors 1217 * 1218 * @param socket_id 1219 * NUMA socket id, used for memory allocations 1220 * 1221 * @param tx_conf 1222 * Pointer to the structure rte_eth_txconf 1223 * 1224 * @return 1225 * - On success, return 0 1226 * - On failure, return -errno value 1227 */ 1228 static int 1229 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1230 uint16_t num_tx_descs, unsigned int socket_id, 1231 const struct rte_eth_txconf *tx_conf __rte_unused) 1232 { 1233 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1234 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no; 1235 int retval; 1236 1237 if (q_no >= lio_dev->nb_tx_queues) { 1238 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no); 1239 return -EINVAL; 1240 } 1241 1242 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no); 1243 1244 /* Free previous allocation if any */ 1245 if (eth_dev->data->tx_queues[q_no] != NULL) { 1246 lio_dev_tx_queue_release(eth_dev, q_no); 1247 eth_dev->data->tx_queues[q_no] = NULL; 1248 } 1249 1250 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no], 1251 num_tx_descs, lio_dev, socket_id); 1252 1253 if (retval) { 1254 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n"); 1255 return retval; 1256 } 1257 1258 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq, 1259 lio_dev->instr_queue[fw_mapped_iq]->nb_desc, 1260 socket_id); 1261 1262 if (retval) { 1263 lio_delete_instruction_queue(lio_dev, fw_mapped_iq); 1264 return retval; 1265 } 1266 1267 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq]; 1268 1269 return 0; 1270 } 1271 1272 /** 1273 * Release the transmit queue/ringbuffer. Called by 1274 * the upper layers. 1275 * 1276 * @param eth_dev 1277 * Pointer to Ethernet device structure. 1278 * @param q_no 1279 * Transmit queue index. 1280 * 1281 * @return 1282 * - nothing 1283 */ 1284 void 1285 lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no) 1286 { 1287 struct lio_instr_queue *tq = dev->data->tx_queues[q_no]; 1288 uint32_t fw_mapped_iq_no; 1289 1290 1291 if (tq) { 1292 /* Free sg_list */ 1293 lio_delete_sglist(tq); 1294 1295 fw_mapped_iq_no = tq->txpciq.s.q_no; 1296 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no); 1297 } 1298 } 1299 1300 /** 1301 * Api to check link state. 1302 */ 1303 static void 1304 lio_dev_get_link_status(struct rte_eth_dev *eth_dev) 1305 { 1306 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1307 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1308 struct lio_link_status_resp *resp; 1309 union octeon_link_status *ls; 1310 struct lio_soft_command *sc; 1311 uint32_t resp_size; 1312 1313 if (!lio_dev->intf_open) 1314 return; 1315 1316 resp_size = sizeof(struct lio_link_status_resp); 1317 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1318 if (sc == NULL) 1319 return; 1320 1321 resp = (struct lio_link_status_resp *)sc->virtrptr; 1322 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1323 LIO_OPCODE_INFO, 0, 0, 0); 1324 1325 /* Setting wait time in seconds */ 1326 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1327 1328 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED) 1329 goto get_status_fail; 1330 1331 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1332 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1333 rte_delay_ms(1); 1334 } 1335 1336 if (resp->status) 1337 goto get_status_fail; 1338 1339 ls = &resp->link_info.link; 1340 1341 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3); 1342 1343 if (lio_dev->linfo.link.link_status64 != ls->link_status64) { 1344 if (ls->s.mtu < eth_dev->data->mtu) { 1345 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n", 1346 ls->s.mtu); 1347 eth_dev->data->mtu = ls->s.mtu; 1348 } 1349 lio_dev->linfo.link.link_status64 = ls->link_status64; 1350 lio_dev_link_update(eth_dev, 0); 1351 } 1352 1353 lio_free_soft_command(sc); 1354 1355 return; 1356 1357 get_status_fail: 1358 lio_free_soft_command(sc); 1359 } 1360 1361 /* This function will be invoked every LSC_TIMEOUT ns (100ms) 1362 * and will update link state if it changes. 1363 */ 1364 static void 1365 lio_sync_link_state_check(void *eth_dev) 1366 { 1367 struct lio_device *lio_dev = 1368 (((struct rte_eth_dev *)eth_dev)->data->dev_private); 1369 1370 if (lio_dev->port_configured) 1371 lio_dev_get_link_status(eth_dev); 1372 1373 /* Schedule periodic link status check. 1374 * Stop check if interface is close and start again while opening. 1375 */ 1376 if (lio_dev->intf_open) 1377 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, 1378 eth_dev); 1379 } 1380 1381 static int 1382 lio_dev_start(struct rte_eth_dev *eth_dev) 1383 { 1384 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1385 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1386 int ret = 0; 1387 1388 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id); 1389 1390 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 1391 return -1; 1392 1393 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) 1394 return -1; 1395 1396 /* Ready for link status updates */ 1397 lio_dev->intf_open = 1; 1398 rte_mb(); 1399 1400 /* Configure RSS if device configured with multiple RX queues. */ 1401 lio_dev_mq_rx_configure(eth_dev); 1402 1403 /* Before update the link info, 1404 * must set linfo.link.link_status64 to 0. 1405 */ 1406 lio_dev->linfo.link.link_status64 = 0; 1407 1408 /* start polling for lsc */ 1409 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, 1410 lio_sync_link_state_check, 1411 eth_dev); 1412 if (ret) { 1413 lio_dev_err(lio_dev, 1414 "link state check handler creation failed\n"); 1415 goto dev_lsc_handle_error; 1416 } 1417 1418 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout)) 1419 rte_delay_ms(1); 1420 1421 if (lio_dev->linfo.link.link_status64 == 0) { 1422 ret = -1; 1423 goto dev_mtu_set_error; 1424 } 1425 1426 ret = lio_dev_mtu_set(eth_dev, eth_dev->data->mtu); 1427 if (ret != 0) 1428 goto dev_mtu_set_error; 1429 1430 return 0; 1431 1432 dev_mtu_set_error: 1433 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1434 1435 dev_lsc_handle_error: 1436 lio_dev->intf_open = 0; 1437 lio_send_rx_ctrl_cmd(eth_dev, 0); 1438 1439 return ret; 1440 } 1441 1442 /* Stop device and disable input/output functions */ 1443 static int 1444 lio_dev_stop(struct rte_eth_dev *eth_dev) 1445 { 1446 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1447 1448 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id); 1449 eth_dev->data->dev_started = 0; 1450 lio_dev->intf_open = 0; 1451 rte_mb(); 1452 1453 /* Cancel callback if still running. */ 1454 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1455 1456 lio_send_rx_ctrl_cmd(eth_dev, 0); 1457 1458 lio_wait_for_instr_fetch(lio_dev); 1459 1460 /* Clear recorded link status */ 1461 lio_dev->linfo.link.link_status64 = 0; 1462 1463 return 0; 1464 } 1465 1466 static int 1467 lio_dev_set_link_up(struct rte_eth_dev *eth_dev) 1468 { 1469 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1470 1471 if (!lio_dev->intf_open) { 1472 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1473 return 0; 1474 } 1475 1476 if (lio_dev->linfo.link.s.link_up) { 1477 lio_dev_info(lio_dev, "Link is already UP\n"); 1478 return 0; 1479 } 1480 1481 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) { 1482 lio_dev_err(lio_dev, "Unable to set Link UP\n"); 1483 return -1; 1484 } 1485 1486 lio_dev->linfo.link.s.link_up = 1; 1487 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 1488 1489 return 0; 1490 } 1491 1492 static int 1493 lio_dev_set_link_down(struct rte_eth_dev *eth_dev) 1494 { 1495 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1496 1497 if (!lio_dev->intf_open) { 1498 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1499 return 0; 1500 } 1501 1502 if (!lio_dev->linfo.link.s.link_up) { 1503 lio_dev_info(lio_dev, "Link is already DOWN\n"); 1504 return 0; 1505 } 1506 1507 lio_dev->linfo.link.s.link_up = 0; 1508 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 1509 1510 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) { 1511 lio_dev->linfo.link.s.link_up = 1; 1512 eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 1513 lio_dev_err(lio_dev, "Unable to set Link Down\n"); 1514 return -1; 1515 } 1516 1517 return 0; 1518 } 1519 1520 /** 1521 * Reset and stop the device. This occurs on the first 1522 * call to this routine. Subsequent calls will simply 1523 * return. NB: This will require the NIC to be rebooted. 1524 * 1525 * @param eth_dev 1526 * Pointer to the structure rte_eth_dev 1527 * 1528 * @return 1529 * - nothing 1530 */ 1531 static int 1532 lio_dev_close(struct rte_eth_dev *eth_dev) 1533 { 1534 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1535 int ret = 0; 1536 1537 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1538 return 0; 1539 1540 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id); 1541 1542 if (lio_dev->intf_open) 1543 ret = lio_dev_stop(eth_dev); 1544 1545 /* Reset ioq regs */ 1546 lio_dev->fn_list.setup_device_regs(lio_dev); 1547 1548 if (lio_dev->pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO) { 1549 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 1550 rte_delay_ms(LIO_PCI_FLR_WAIT); 1551 } 1552 1553 /* lio_free_mbox */ 1554 lio_dev->fn_list.free_mbox(lio_dev); 1555 1556 /* Free glist resources */ 1557 rte_free(lio_dev->glist_head); 1558 rte_free(lio_dev->glist_lock); 1559 lio_dev->glist_head = NULL; 1560 lio_dev->glist_lock = NULL; 1561 1562 lio_dev->port_configured = 0; 1563 1564 /* Delete all queues */ 1565 lio_dev_clear_queues(eth_dev); 1566 1567 return ret; 1568 } 1569 1570 /** 1571 * Enable tunnel rx checksum verification from firmware. 1572 */ 1573 static void 1574 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev) 1575 { 1576 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1577 struct lio_dev_ctrl_cmd ctrl_cmd; 1578 struct lio_ctrl_pkt ctrl_pkt; 1579 1580 /* flush added to prevent cmd failure 1581 * incase the queue is full 1582 */ 1583 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1584 1585 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1586 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1587 1588 ctrl_cmd.eth_dev = eth_dev; 1589 ctrl_cmd.cond = 0; 1590 1591 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL; 1592 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE; 1593 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1594 1595 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1596 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n"); 1597 return; 1598 } 1599 1600 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1601 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n"); 1602 } 1603 1604 /** 1605 * Enable checksum calculation for inner packet in a tunnel. 1606 */ 1607 static void 1608 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev) 1609 { 1610 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1611 struct lio_dev_ctrl_cmd ctrl_cmd; 1612 struct lio_ctrl_pkt ctrl_pkt; 1613 1614 /* flush added to prevent cmd failure 1615 * incase the queue is full 1616 */ 1617 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1618 1619 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1620 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1621 1622 ctrl_cmd.eth_dev = eth_dev; 1623 ctrl_cmd.cond = 0; 1624 1625 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL; 1626 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE; 1627 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1628 1629 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1630 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n"); 1631 return; 1632 } 1633 1634 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1635 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n"); 1636 } 1637 1638 static int 1639 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq, 1640 int num_rxq) 1641 { 1642 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1643 struct lio_dev_ctrl_cmd ctrl_cmd; 1644 struct lio_ctrl_pkt ctrl_pkt; 1645 1646 if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) { 1647 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1648 LIO_Q_RECONF_MIN_VERSION); 1649 return -ENOTSUP; 1650 } 1651 1652 /* flush added to prevent cmd failure 1653 * incase the queue is full 1654 */ 1655 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1656 1657 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1658 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1659 1660 ctrl_cmd.eth_dev = eth_dev; 1661 ctrl_cmd.cond = 0; 1662 1663 ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL; 1664 ctrl_pkt.ncmd.s.param1 = num_txq; 1665 ctrl_pkt.ncmd.s.param2 = num_rxq; 1666 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1667 1668 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1669 lio_dev_err(lio_dev, "Failed to send queue count control command\n"); 1670 return -1; 1671 } 1672 1673 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 1674 lio_dev_err(lio_dev, "Queue count control command timed out\n"); 1675 return -1; 1676 } 1677 1678 return 0; 1679 } 1680 1681 static int 1682 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq) 1683 { 1684 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1685 int ret; 1686 1687 if (lio_dev->nb_rx_queues != num_rxq || 1688 lio_dev->nb_tx_queues != num_txq) { 1689 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq)) 1690 return -1; 1691 lio_dev->nb_rx_queues = num_rxq; 1692 lio_dev->nb_tx_queues = num_txq; 1693 } 1694 1695 if (lio_dev->intf_open) { 1696 ret = lio_dev_stop(eth_dev); 1697 if (ret != 0) 1698 return ret; 1699 } 1700 1701 /* Reset ioq registers */ 1702 if (lio_dev->fn_list.setup_device_regs(lio_dev)) { 1703 lio_dev_err(lio_dev, "Failed to configure device registers\n"); 1704 return -1; 1705 } 1706 1707 return 0; 1708 } 1709 1710 static int 1711 lio_dev_configure(struct rte_eth_dev *eth_dev) 1712 { 1713 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1714 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1715 int retval, num_iqueues, num_oqueues; 1716 uint8_t mac[RTE_ETHER_ADDR_LEN], i; 1717 struct lio_if_cfg_resp *resp; 1718 struct lio_soft_command *sc; 1719 union lio_if_cfg if_cfg; 1720 uint32_t resp_size; 1721 1722 PMD_INIT_FUNC_TRACE(); 1723 1724 if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1725 eth_dev->data->dev_conf.rxmode.offloads |= 1726 RTE_ETH_RX_OFFLOAD_RSS_HASH; 1727 1728 /* Inform firmware about change in number of queues to use. 1729 * Disable IO queues and reset registers for re-configuration. 1730 */ 1731 if (lio_dev->port_configured) 1732 return lio_reconf_queues(eth_dev, 1733 eth_dev->data->nb_tx_queues, 1734 eth_dev->data->nb_rx_queues); 1735 1736 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues; 1737 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues; 1738 1739 /* Set max number of queues which can be re-configured. */ 1740 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues; 1741 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues; 1742 1743 resp_size = sizeof(struct lio_if_cfg_resp); 1744 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1745 if (sc == NULL) 1746 return -ENOMEM; 1747 1748 resp = (struct lio_if_cfg_resp *)sc->virtrptr; 1749 1750 /* Firmware doesn't have capability to reconfigure the queues, 1751 * Claim all queues, and use as many required 1752 */ 1753 if_cfg.if_cfg64 = 0; 1754 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues; 1755 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues; 1756 if_cfg.s.base_queue = 0; 1757 1758 if_cfg.s.gmx_port_id = lio_dev->pf_num; 1759 1760 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1761 LIO_OPCODE_IF_CFG, 0, 1762 if_cfg.if_cfg64, 0); 1763 1764 /* Setting wait time in seconds */ 1765 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1766 1767 retval = lio_send_soft_command(lio_dev, sc); 1768 if (retval == LIO_IQ_SEND_FAILED) { 1769 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n", 1770 retval); 1771 /* Soft instr is freed by driver in case of failure. */ 1772 goto nic_config_fail; 1773 } 1774 1775 /* Sleep on a wait queue till the cond flag indicates that the 1776 * response arrived or timed-out. 1777 */ 1778 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1779 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1780 lio_process_ordered_list(lio_dev); 1781 rte_delay_ms(1); 1782 } 1783 1784 retval = resp->status; 1785 if (retval) { 1786 lio_dev_err(lio_dev, "iq/oq config failed\n"); 1787 goto nic_config_fail; 1788 } 1789 1790 strlcpy(lio_dev->firmware_version, 1791 resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH); 1792 1793 lio_swap_8B_data((uint64_t *)(&resp->cfg_info), 1794 sizeof(struct octeon_if_cfg_info) >> 3); 1795 1796 num_iqueues = lio_hweight64(resp->cfg_info.iqmask); 1797 num_oqueues = lio_hweight64(resp->cfg_info.oqmask); 1798 1799 if (!(num_iqueues) || !(num_oqueues)) { 1800 lio_dev_err(lio_dev, 1801 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n", 1802 (unsigned long)resp->cfg_info.iqmask, 1803 (unsigned long)resp->cfg_info.oqmask); 1804 goto nic_config_fail; 1805 } 1806 1807 lio_dev_dbg(lio_dev, 1808 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n", 1809 eth_dev->data->port_id, 1810 (unsigned long)resp->cfg_info.iqmask, 1811 (unsigned long)resp->cfg_info.oqmask, 1812 num_iqueues, num_oqueues); 1813 1814 lio_dev->linfo.num_rxpciq = num_oqueues; 1815 lio_dev->linfo.num_txpciq = num_iqueues; 1816 1817 for (i = 0; i < num_oqueues; i++) { 1818 lio_dev->linfo.rxpciq[i].rxpciq64 = 1819 resp->cfg_info.linfo.rxpciq[i].rxpciq64; 1820 lio_dev_dbg(lio_dev, "index %d OQ %d\n", 1821 i, lio_dev->linfo.rxpciq[i].s.q_no); 1822 } 1823 1824 for (i = 0; i < num_iqueues; i++) { 1825 lio_dev->linfo.txpciq[i].txpciq64 = 1826 resp->cfg_info.linfo.txpciq[i].txpciq64; 1827 lio_dev_dbg(lio_dev, "index %d IQ %d\n", 1828 i, lio_dev->linfo.txpciq[i].s.q_no); 1829 } 1830 1831 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1832 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1833 lio_dev->linfo.link.link_status64 = 1834 resp->cfg_info.linfo.link.link_status64; 1835 1836 /* 64-bit swap required on LE machines */ 1837 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1); 1838 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) 1839 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) + 1840 2 + i)); 1841 1842 /* Copy the permanent MAC address */ 1843 rte_ether_addr_copy((struct rte_ether_addr *)mac, 1844 ð_dev->data->mac_addrs[0]); 1845 1846 /* enable firmware checksum support for tunnel packets */ 1847 lio_enable_hw_tunnel_rx_checksum(eth_dev); 1848 lio_enable_hw_tunnel_tx_checksum(eth_dev); 1849 1850 lio_dev->glist_lock = 1851 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0); 1852 if (lio_dev->glist_lock == NULL) 1853 return -ENOMEM; 1854 1855 lio_dev->glist_head = 1856 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues, 1857 0); 1858 if (lio_dev->glist_head == NULL) { 1859 rte_free(lio_dev->glist_lock); 1860 lio_dev->glist_lock = NULL; 1861 return -ENOMEM; 1862 } 1863 1864 lio_dev_link_update(eth_dev, 0); 1865 1866 lio_dev->port_configured = 1; 1867 1868 lio_free_soft_command(sc); 1869 1870 /* Reset ioq regs */ 1871 lio_dev->fn_list.setup_device_regs(lio_dev); 1872 1873 /* Free iq_0 used during init */ 1874 lio_free_instr_queue0(lio_dev); 1875 1876 return 0; 1877 1878 nic_config_fail: 1879 lio_dev_err(lio_dev, "Failed retval %d\n", retval); 1880 lio_free_soft_command(sc); 1881 lio_free_instr_queue0(lio_dev); 1882 1883 return -ENODEV; 1884 } 1885 1886 /* Define our ethernet definitions */ 1887 static const struct eth_dev_ops liovf_eth_dev_ops = { 1888 .dev_configure = lio_dev_configure, 1889 .dev_start = lio_dev_start, 1890 .dev_stop = lio_dev_stop, 1891 .dev_set_link_up = lio_dev_set_link_up, 1892 .dev_set_link_down = lio_dev_set_link_down, 1893 .dev_close = lio_dev_close, 1894 .promiscuous_enable = lio_dev_promiscuous_enable, 1895 .promiscuous_disable = lio_dev_promiscuous_disable, 1896 .allmulticast_enable = lio_dev_allmulticast_enable, 1897 .allmulticast_disable = lio_dev_allmulticast_disable, 1898 .link_update = lio_dev_link_update, 1899 .stats_get = lio_dev_stats_get, 1900 .xstats_get = lio_dev_xstats_get, 1901 .xstats_get_names = lio_dev_xstats_get_names, 1902 .stats_reset = lio_dev_stats_reset, 1903 .xstats_reset = lio_dev_xstats_reset, 1904 .dev_infos_get = lio_dev_info_get, 1905 .vlan_filter_set = lio_dev_vlan_filter_set, 1906 .rx_queue_setup = lio_dev_rx_queue_setup, 1907 .rx_queue_release = lio_dev_rx_queue_release, 1908 .tx_queue_setup = lio_dev_tx_queue_setup, 1909 .tx_queue_release = lio_dev_tx_queue_release, 1910 .reta_update = lio_dev_rss_reta_update, 1911 .reta_query = lio_dev_rss_reta_query, 1912 .rss_hash_conf_get = lio_dev_rss_hash_conf_get, 1913 .rss_hash_update = lio_dev_rss_hash_update, 1914 .udp_tunnel_port_add = lio_dev_udp_tunnel_add, 1915 .udp_tunnel_port_del = lio_dev_udp_tunnel_del, 1916 .mtu_set = lio_dev_mtu_set, 1917 }; 1918 1919 static void 1920 lio_check_pf_hs_response(void *lio_dev) 1921 { 1922 struct lio_device *dev = lio_dev; 1923 1924 /* check till response arrives */ 1925 if (dev->pfvf_hsword.coproc_tics_per_us) 1926 return; 1927 1928 cn23xx_vf_handle_mbox(dev); 1929 1930 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev); 1931 } 1932 1933 /** 1934 * \brief Identify the LIO device and to map the BAR address space 1935 * @param lio_dev lio device 1936 */ 1937 static int 1938 lio_chip_specific_setup(struct lio_device *lio_dev) 1939 { 1940 struct rte_pci_device *pdev = lio_dev->pci_dev; 1941 uint32_t dev_id = pdev->id.device_id; 1942 const char *s; 1943 int ret = 1; 1944 1945 switch (dev_id) { 1946 case LIO_CN23XX_VF_VID: 1947 lio_dev->chip_id = LIO_CN23XX_VF_VID; 1948 ret = cn23xx_vf_setup_device(lio_dev); 1949 s = "CN23XX VF"; 1950 break; 1951 default: 1952 s = "?"; 1953 lio_dev_err(lio_dev, "Unsupported Chip\n"); 1954 } 1955 1956 if (!ret) 1957 lio_dev_info(lio_dev, "DEVICE : %s\n", s); 1958 1959 return ret; 1960 } 1961 1962 static int 1963 lio_first_time_init(struct lio_device *lio_dev, 1964 struct rte_pci_device *pdev) 1965 { 1966 int dpdk_queues; 1967 1968 PMD_INIT_FUNC_TRACE(); 1969 1970 /* set dpdk specific pci device pointer */ 1971 lio_dev->pci_dev = pdev; 1972 1973 /* Identify the LIO type and set device ops */ 1974 if (lio_chip_specific_setup(lio_dev)) { 1975 lio_dev_err(lio_dev, "Chip specific setup failed\n"); 1976 return -1; 1977 } 1978 1979 /* Initialize soft command buffer pool */ 1980 if (lio_setup_sc_buffer_pool(lio_dev)) { 1981 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n"); 1982 return -1; 1983 } 1984 1985 /* Initialize lists to manage the requests of different types that 1986 * arrive from applications for this lio device. 1987 */ 1988 lio_setup_response_list(lio_dev); 1989 1990 if (lio_dev->fn_list.setup_mbox(lio_dev)) { 1991 lio_dev_err(lio_dev, "Mailbox setup failed\n"); 1992 goto error; 1993 } 1994 1995 /* Check PF response */ 1996 lio_check_pf_hs_response((void *)lio_dev); 1997 1998 /* Do handshake and exit if incompatible PF driver */ 1999 if (cn23xx_pfvf_handshake(lio_dev)) 2000 goto error; 2001 2002 /* Request and wait for device reset. */ 2003 if (pdev->kdrv == RTE_PCI_KDRV_IGB_UIO) { 2004 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 2005 /* FLR wait time doubled as a precaution. */ 2006 rte_delay_ms(LIO_PCI_FLR_WAIT * 2); 2007 } 2008 2009 if (lio_dev->fn_list.setup_device_regs(lio_dev)) { 2010 lio_dev_err(lio_dev, "Failed to configure device registers\n"); 2011 goto error; 2012 } 2013 2014 if (lio_setup_instr_queue0(lio_dev)) { 2015 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n"); 2016 goto error; 2017 } 2018 2019 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf; 2020 2021 lio_dev->max_tx_queues = dpdk_queues; 2022 lio_dev->max_rx_queues = dpdk_queues; 2023 2024 /* Enable input and output queues for this device */ 2025 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 2026 goto error; 2027 2028 return 0; 2029 2030 error: 2031 lio_free_sc_buffer_pool(lio_dev); 2032 if (lio_dev->mbox[0]) 2033 lio_dev->fn_list.free_mbox(lio_dev); 2034 if (lio_dev->instr_queue[0]) 2035 lio_free_instr_queue0(lio_dev); 2036 2037 return -1; 2038 } 2039 2040 static int 2041 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2042 { 2043 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2044 2045 PMD_INIT_FUNC_TRACE(); 2046 2047 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2048 return 0; 2049 2050 /* lio_free_sc_buffer_pool */ 2051 lio_free_sc_buffer_pool(lio_dev); 2052 2053 return 0; 2054 } 2055 2056 static int 2057 lio_eth_dev_init(struct rte_eth_dev *eth_dev) 2058 { 2059 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 2060 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2061 2062 PMD_INIT_FUNC_TRACE(); 2063 2064 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts; 2065 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts; 2066 2067 /* Primary does the initialization. */ 2068 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2069 return 0; 2070 2071 rte_eth_copy_pci_info(eth_dev, pdev); 2072 2073 if (pdev->mem_resource[0].addr) { 2074 lio_dev->hw_addr = pdev->mem_resource[0].addr; 2075 } else { 2076 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n"); 2077 return -ENODEV; 2078 } 2079 2080 lio_dev->eth_dev = eth_dev; 2081 /* set lio device print string */ 2082 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string), 2083 "%s[%02x:%02x.%x]", pdev->driver->driver.name, 2084 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 2085 2086 lio_dev->port_id = eth_dev->data->port_id; 2087 2088 if (lio_first_time_init(lio_dev, pdev)) { 2089 lio_dev_err(lio_dev, "Device init failed\n"); 2090 return -EINVAL; 2091 } 2092 2093 eth_dev->dev_ops = &liovf_eth_dev_ops; 2094 eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0); 2095 if (eth_dev->data->mac_addrs == NULL) { 2096 lio_dev_err(lio_dev, 2097 "MAC addresses memory allocation failed\n"); 2098 eth_dev->dev_ops = NULL; 2099 eth_dev->rx_pkt_burst = NULL; 2100 eth_dev->tx_pkt_burst = NULL; 2101 return -ENOMEM; 2102 } 2103 2104 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING); 2105 rte_wmb(); 2106 2107 lio_dev->port_configured = 0; 2108 /* Always allow unicast packets */ 2109 lio_dev->ifflags |= LIO_IFFLAG_UNICAST; 2110 2111 return 0; 2112 } 2113 2114 static int 2115 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2116 struct rte_pci_device *pci_dev) 2117 { 2118 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device), 2119 lio_eth_dev_init); 2120 } 2121 2122 static int 2123 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2124 { 2125 return rte_eth_dev_pci_generic_remove(pci_dev, 2126 lio_eth_dev_uninit); 2127 } 2128 2129 /* Set of PCI devices this driver supports */ 2130 static const struct rte_pci_id pci_id_liovf_map[] = { 2131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) }, 2132 { .vendor_id = 0, /* sentinel */ } 2133 }; 2134 2135 static struct rte_pci_driver rte_liovf_pmd = { 2136 .id_table = pci_id_liovf_map, 2137 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2138 .probe = lio_eth_dev_pci_probe, 2139 .remove = lio_eth_dev_pci_remove, 2140 }; 2141 2142 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd); 2143 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map); 2144 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci"); 2145 RTE_LOG_REGISTER_SUFFIX(lio_logtype_init, init, NOTICE); 2146 RTE_LOG_REGISTER_SUFFIX(lio_logtype_driver, driver, NOTICE); 2147