1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Cavium, Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_ethdev.h> 35 #include <rte_ethdev_pci.h> 36 #include <rte_cycles.h> 37 #include <rte_malloc.h> 38 #include <rte_alarm.h> 39 #include <rte_ether.h> 40 41 #include "lio_logs.h" 42 #include "lio_23xx_vf.h" 43 #include "lio_ethdev.h" 44 #include "lio_rxtx.h" 45 46 /* Default RSS key in use */ 47 static uint8_t lio_rss_key[40] = { 48 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 49 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 50 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 51 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 52 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, 53 }; 54 55 static const struct rte_eth_desc_lim lio_rx_desc_lim = { 56 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS, 57 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS, 58 .nb_align = 1, 59 }; 60 61 static const struct rte_eth_desc_lim lio_tx_desc_lim = { 62 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS, 63 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS, 64 .nb_align = 1, 65 }; 66 67 /* Wait for control command to reach nic. */ 68 static uint16_t 69 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev, 70 struct lio_dev_ctrl_cmd *ctrl_cmd) 71 { 72 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 73 74 while ((ctrl_cmd->cond == 0) && --timeout) { 75 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 76 rte_delay_ms(1); 77 } 78 79 return !timeout; 80 } 81 82 /** 83 * \brief Send Rx control command 84 * @param eth_dev Pointer to the structure rte_eth_dev 85 * @param start_stop whether to start or stop 86 */ 87 static int 88 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop) 89 { 90 struct lio_device *lio_dev = LIO_DEV(eth_dev); 91 struct lio_dev_ctrl_cmd ctrl_cmd; 92 struct lio_ctrl_pkt ctrl_pkt; 93 94 /* flush added to prevent cmd failure 95 * incase the queue is full 96 */ 97 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 98 99 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 100 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 101 102 ctrl_cmd.eth_dev = eth_dev; 103 ctrl_cmd.cond = 0; 104 105 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL; 106 ctrl_pkt.ncmd.s.param1 = start_stop; 107 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 108 109 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 110 lio_dev_err(lio_dev, "Failed to send RX Control message\n"); 111 return -1; 112 } 113 114 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 115 lio_dev_err(lio_dev, "RX Control command timed out\n"); 116 return -1; 117 } 118 119 return 0; 120 } 121 122 /* store statistics names and its offset in stats structure */ 123 struct rte_lio_xstats_name_off { 124 char name[RTE_ETH_XSTATS_NAME_SIZE]; 125 unsigned int offset; 126 }; 127 128 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = { 129 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)}, 130 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)}, 131 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)}, 132 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)}, 133 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)}, 134 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)}, 135 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)}, 136 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)}, 137 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)}, 138 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)}, 139 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)}, 140 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)}, 141 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)}, 142 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) + 143 sizeof(struct octeon_rx_stats)}, 144 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) + 145 sizeof(struct octeon_rx_stats)}, 146 {"tx_broadcast_pkts", 147 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) + 148 sizeof(struct octeon_rx_stats)}, 149 {"tx_multicast_pkts", 150 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) + 151 sizeof(struct octeon_rx_stats)}, 152 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) + 153 sizeof(struct octeon_rx_stats)}, 154 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) + 155 sizeof(struct octeon_rx_stats)}, 156 {"tx_total_collisions", (offsetof(struct octeon_tx_stats, 157 total_collisions)) + 158 sizeof(struct octeon_rx_stats)}, 159 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) + 160 sizeof(struct octeon_rx_stats)}, 161 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) + 162 sizeof(struct octeon_rx_stats)}, 163 }; 164 165 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings) 166 167 /* Get hw stats of the port */ 168 static int 169 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, 170 unsigned int n) 171 { 172 struct lio_device *lio_dev = LIO_DEV(eth_dev); 173 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 174 struct octeon_link_stats *hw_stats; 175 struct lio_link_stats_resp *resp; 176 struct lio_soft_command *sc; 177 uint32_t resp_size; 178 unsigned int i; 179 int retval; 180 181 if (!lio_dev->intf_open) { 182 lio_dev_err(lio_dev, "Port %d down\n", 183 lio_dev->port_id); 184 return -EINVAL; 185 } 186 187 if (n < LIO_NB_XSTATS) 188 return LIO_NB_XSTATS; 189 190 resp_size = sizeof(struct lio_link_stats_resp); 191 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 192 if (sc == NULL) 193 return -ENOMEM; 194 195 resp = (struct lio_link_stats_resp *)sc->virtrptr; 196 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 197 LIO_OPCODE_PORT_STATS, 0, 0, 0); 198 199 /* Setting wait time in seconds */ 200 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 201 202 retval = lio_send_soft_command(lio_dev, sc); 203 if (retval == LIO_IQ_SEND_FAILED) { 204 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n", 205 retval); 206 goto get_stats_fail; 207 } 208 209 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 210 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 211 lio_process_ordered_list(lio_dev); 212 rte_delay_ms(1); 213 } 214 215 retval = resp->status; 216 if (retval) { 217 lio_dev_err(lio_dev, "failed to get port stats from firmware\n"); 218 goto get_stats_fail; 219 } 220 221 lio_swap_8B_data((uint64_t *)(&resp->link_stats), 222 sizeof(struct octeon_link_stats) >> 3); 223 224 hw_stats = &resp->link_stats; 225 226 for (i = 0; i < LIO_NB_XSTATS; i++) { 227 xstats[i].id = i; 228 xstats[i].value = 229 *(uint64_t *)(((char *)hw_stats) + 230 rte_lio_stats_strings[i].offset); 231 } 232 233 lio_free_soft_command(sc); 234 235 return LIO_NB_XSTATS; 236 237 get_stats_fail: 238 lio_free_soft_command(sc); 239 240 return -1; 241 } 242 243 static int 244 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev, 245 struct rte_eth_xstat_name *xstats_names, 246 unsigned limit __rte_unused) 247 { 248 struct lio_device *lio_dev = LIO_DEV(eth_dev); 249 unsigned int i; 250 251 if (!lio_dev->intf_open) { 252 lio_dev_err(lio_dev, "Port %d down\n", 253 lio_dev->port_id); 254 return -EINVAL; 255 } 256 257 if (xstats_names == NULL) 258 return LIO_NB_XSTATS; 259 260 /* Note: limit checked in rte_eth_xstats_names() */ 261 262 for (i = 0; i < LIO_NB_XSTATS; i++) { 263 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), 264 "%s", rte_lio_stats_strings[i].name); 265 } 266 267 return LIO_NB_XSTATS; 268 } 269 270 /* Reset hw stats for the port */ 271 static void 272 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev) 273 { 274 struct lio_device *lio_dev = LIO_DEV(eth_dev); 275 struct lio_dev_ctrl_cmd ctrl_cmd; 276 struct lio_ctrl_pkt ctrl_pkt; 277 278 if (!lio_dev->intf_open) { 279 lio_dev_err(lio_dev, "Port %d down\n", 280 lio_dev->port_id); 281 return; 282 } 283 284 /* flush added to prevent cmd failure 285 * incase the queue is full 286 */ 287 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 288 289 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 290 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 291 292 ctrl_cmd.eth_dev = eth_dev; 293 ctrl_cmd.cond = 0; 294 295 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS; 296 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 297 298 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 299 lio_dev_err(lio_dev, "Failed to send clear stats command\n"); 300 return; 301 } 302 303 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 304 lio_dev_err(lio_dev, "Clear stats command timed out\n"); 305 return; 306 } 307 308 /* clear stored per queue stats */ 309 RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset); 310 (*eth_dev->dev_ops->stats_reset)(eth_dev); 311 } 312 313 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */ 314 static int 315 lio_dev_stats_get(struct rte_eth_dev *eth_dev, 316 struct rte_eth_stats *stats) 317 { 318 struct lio_device *lio_dev = LIO_DEV(eth_dev); 319 struct lio_droq_stats *oq_stats; 320 struct lio_iq_stats *iq_stats; 321 struct lio_instr_queue *txq; 322 struct lio_droq *droq; 323 int i, iq_no, oq_no; 324 uint64_t bytes = 0; 325 uint64_t pkts = 0; 326 uint64_t drop = 0; 327 328 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 329 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 330 txq = lio_dev->instr_queue[iq_no]; 331 if (txq != NULL) { 332 iq_stats = &txq->stats; 333 pkts += iq_stats->tx_done; 334 drop += iq_stats->tx_dropped; 335 bytes += iq_stats->tx_tot_bytes; 336 } 337 } 338 339 stats->opackets = pkts; 340 stats->obytes = bytes; 341 stats->oerrors = drop; 342 343 pkts = 0; 344 drop = 0; 345 bytes = 0; 346 347 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 348 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 349 droq = lio_dev->droq[oq_no]; 350 if (droq != NULL) { 351 oq_stats = &droq->stats; 352 pkts += oq_stats->rx_pkts_received; 353 drop += (oq_stats->rx_dropped + 354 oq_stats->dropped_toomany + 355 oq_stats->dropped_nomem); 356 bytes += oq_stats->rx_bytes_received; 357 } 358 } 359 stats->ibytes = bytes; 360 stats->ipackets = pkts; 361 stats->ierrors = drop; 362 363 return 0; 364 } 365 366 static void 367 lio_dev_stats_reset(struct rte_eth_dev *eth_dev) 368 { 369 struct lio_device *lio_dev = LIO_DEV(eth_dev); 370 struct lio_droq_stats *oq_stats; 371 struct lio_iq_stats *iq_stats; 372 struct lio_instr_queue *txq; 373 struct lio_droq *droq; 374 int i, iq_no, oq_no; 375 376 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 377 iq_no = lio_dev->linfo.txpciq[i].s.q_no; 378 txq = lio_dev->instr_queue[iq_no]; 379 if (txq != NULL) { 380 iq_stats = &txq->stats; 381 memset(iq_stats, 0, sizeof(struct lio_iq_stats)); 382 } 383 } 384 385 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 386 oq_no = lio_dev->linfo.rxpciq[i].s.q_no; 387 droq = lio_dev->droq[oq_no]; 388 if (droq != NULL) { 389 oq_stats = &droq->stats; 390 memset(oq_stats, 0, sizeof(struct lio_droq_stats)); 391 } 392 } 393 } 394 395 static void 396 lio_dev_info_get(struct rte_eth_dev *eth_dev, 397 struct rte_eth_dev_info *devinfo) 398 { 399 struct lio_device *lio_dev = LIO_DEV(eth_dev); 400 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 401 402 devinfo->pci_dev = pci_dev; 403 404 switch (pci_dev->id.subsystem_device_id) { 405 /* CN23xx 10G cards */ 406 case PCI_SUBSYS_DEV_ID_CN2350_210: 407 case PCI_SUBSYS_DEV_ID_CN2360_210: 408 case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3: 409 case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3: 410 case PCI_SUBSYS_DEV_ID_CN2350_210SVPT: 411 case PCI_SUBSYS_DEV_ID_CN2360_210SVPT: 412 devinfo->speed_capa = ETH_LINK_SPEED_10G; 413 break; 414 /* CN23xx 25G cards */ 415 case PCI_SUBSYS_DEV_ID_CN2350_225: 416 case PCI_SUBSYS_DEV_ID_CN2360_225: 417 devinfo->speed_capa = ETH_LINK_SPEED_25G; 418 break; 419 default: 420 devinfo->speed_capa = ETH_LINK_SPEED_10G; 421 lio_dev_err(lio_dev, 422 "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n"); 423 } 424 425 devinfo->max_rx_queues = lio_dev->max_rx_queues; 426 devinfo->max_tx_queues = lio_dev->max_tx_queues; 427 428 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE; 429 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN; 430 431 devinfo->max_mac_addrs = 1; 432 433 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 434 DEV_RX_OFFLOAD_UDP_CKSUM | 435 DEV_RX_OFFLOAD_TCP_CKSUM | 436 DEV_RX_OFFLOAD_VLAN_STRIP); 437 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM | 438 DEV_TX_OFFLOAD_UDP_CKSUM | 439 DEV_TX_OFFLOAD_TCP_CKSUM | 440 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); 441 442 devinfo->rx_desc_lim = lio_rx_desc_lim; 443 devinfo->tx_desc_lim = lio_tx_desc_lim; 444 445 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ; 446 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ; 447 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 | 448 ETH_RSS_NONFRAG_IPV4_TCP | 449 ETH_RSS_IPV6 | 450 ETH_RSS_NONFRAG_IPV6_TCP | 451 ETH_RSS_IPV6_EX | 452 ETH_RSS_IPV6_TCP_EX); 453 } 454 455 static int 456 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 457 { 458 struct lio_device *lio_dev = LIO_DEV(eth_dev); 459 uint16_t pf_mtu = lio_dev->linfo.link.s.mtu; 460 uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 461 struct lio_dev_ctrl_cmd ctrl_cmd; 462 struct lio_ctrl_pkt ctrl_pkt; 463 464 PMD_INIT_FUNC_TRACE(); 465 466 if (!lio_dev->intf_open) { 467 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n", 468 lio_dev->port_id); 469 return -EINVAL; 470 } 471 472 /* check if VF MTU is within allowed range. 473 * New value should not exceed PF MTU. 474 */ 475 if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) { 476 lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n", 477 ETHER_MIN_MTU, pf_mtu); 478 return -EINVAL; 479 } 480 481 /* flush added to prevent cmd failure 482 * incase the queue is full 483 */ 484 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 485 486 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 487 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 488 489 ctrl_cmd.eth_dev = eth_dev; 490 ctrl_cmd.cond = 0; 491 492 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU; 493 ctrl_pkt.ncmd.s.param1 = mtu; 494 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 495 496 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 497 lio_dev_err(lio_dev, "Failed to send command to change MTU\n"); 498 return -1; 499 } 500 501 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 502 lio_dev_err(lio_dev, "Command to change MTU timed out\n"); 503 return -1; 504 } 505 506 if (frame_len > ETHER_MAX_LEN) 507 eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; 508 else 509 eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; 510 511 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; 512 eth_dev->data->mtu = mtu; 513 514 return 0; 515 } 516 517 static int 518 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev, 519 struct rte_eth_rss_reta_entry64 *reta_conf, 520 uint16_t reta_size) 521 { 522 struct lio_device *lio_dev = LIO_DEV(eth_dev); 523 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 524 struct lio_rss_set *rss_param; 525 struct lio_dev_ctrl_cmd ctrl_cmd; 526 struct lio_ctrl_pkt ctrl_pkt; 527 int i, j, index; 528 529 if (!lio_dev->intf_open) { 530 lio_dev_err(lio_dev, "Port %d down, can't update reta\n", 531 lio_dev->port_id); 532 return -EINVAL; 533 } 534 535 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 536 lio_dev_err(lio_dev, 537 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 538 reta_size, LIO_RSS_MAX_TABLE_SZ); 539 return -EINVAL; 540 } 541 542 /* flush added to prevent cmd failure 543 * incase the queue is full 544 */ 545 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 546 547 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 548 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 549 550 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 551 552 ctrl_cmd.eth_dev = eth_dev; 553 ctrl_cmd.cond = 0; 554 555 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 556 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 557 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 558 559 rss_param->param.flags = 0xF; 560 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED; 561 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ; 562 563 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) { 564 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) { 565 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) { 566 index = (i * RTE_RETA_GROUP_SIZE) + j; 567 rss_state->itable[index] = reta_conf[i].reta[j]; 568 } 569 } 570 } 571 572 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ; 573 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size); 574 575 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 576 577 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 578 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 579 return -1; 580 } 581 582 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 583 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 584 return -1; 585 } 586 587 return 0; 588 } 589 590 static int 591 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev, 592 struct rte_eth_rss_reta_entry64 *reta_conf, 593 uint16_t reta_size) 594 { 595 struct lio_device *lio_dev = LIO_DEV(eth_dev); 596 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 597 int i, num; 598 599 if (reta_size != LIO_RSS_MAX_TABLE_SZ) { 600 lio_dev_err(lio_dev, 601 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", 602 reta_size, LIO_RSS_MAX_TABLE_SZ); 603 return -EINVAL; 604 } 605 606 num = reta_size / RTE_RETA_GROUP_SIZE; 607 608 for (i = 0; i < num; i++) { 609 memcpy(reta_conf->reta, 610 &rss_state->itable[i * RTE_RETA_GROUP_SIZE], 611 RTE_RETA_GROUP_SIZE); 612 reta_conf++; 613 } 614 615 return 0; 616 } 617 618 static int 619 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 620 struct rte_eth_rss_conf *rss_conf) 621 { 622 struct lio_device *lio_dev = LIO_DEV(eth_dev); 623 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 624 uint8_t *hash_key = NULL; 625 uint64_t rss_hf = 0; 626 627 if (rss_state->hash_disable) { 628 lio_dev_info(lio_dev, "RSS disabled in nic\n"); 629 rss_conf->rss_hf = 0; 630 return 0; 631 } 632 633 /* Get key value */ 634 hash_key = rss_conf->rss_key; 635 if (hash_key != NULL) 636 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size); 637 638 if (rss_state->ip) 639 rss_hf |= ETH_RSS_IPV4; 640 if (rss_state->tcp_hash) 641 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 642 if (rss_state->ipv6) 643 rss_hf |= ETH_RSS_IPV6; 644 if (rss_state->ipv6_tcp_hash) 645 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; 646 if (rss_state->ipv6_ex) 647 rss_hf |= ETH_RSS_IPV6_EX; 648 if (rss_state->ipv6_tcp_ex_hash) 649 rss_hf |= ETH_RSS_IPV6_TCP_EX; 650 651 rss_conf->rss_hf = rss_hf; 652 653 return 0; 654 } 655 656 static int 657 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev, 658 struct rte_eth_rss_conf *rss_conf) 659 { 660 struct lio_device *lio_dev = LIO_DEV(eth_dev); 661 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 662 struct lio_rss_set *rss_param; 663 struct lio_dev_ctrl_cmd ctrl_cmd; 664 struct lio_ctrl_pkt ctrl_pkt; 665 666 if (!lio_dev->intf_open) { 667 lio_dev_err(lio_dev, "Port %d down, can't update hash\n", 668 lio_dev->port_id); 669 return -EINVAL; 670 } 671 672 /* flush added to prevent cmd failure 673 * incase the queue is full 674 */ 675 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 676 677 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 678 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 679 680 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0]; 681 682 ctrl_cmd.eth_dev = eth_dev; 683 ctrl_cmd.cond = 0; 684 685 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS; 686 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3; 687 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 688 689 rss_param->param.flags = 0xF; 690 691 if (rss_conf->rss_key) { 692 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED; 693 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ; 694 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ; 695 memcpy(rss_state->hash_key, rss_conf->rss_key, 696 rss_state->hash_key_size); 697 memcpy(rss_param->key, rss_state->hash_key, 698 rss_state->hash_key_size); 699 } 700 701 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 702 /* Can't disable rss through hash flags, 703 * if it is enabled by default during init 704 */ 705 if (!rss_state->hash_disable) 706 return -EINVAL; 707 708 /* This is for --disable-rss during testpmd launch */ 709 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS; 710 } else { 711 uint32_t hashinfo = 0; 712 713 /* Can't enable rss if disabled by default during init */ 714 if (rss_state->hash_disable) 715 return -EINVAL; 716 717 if (rss_conf->rss_hf & ETH_RSS_IPV4) { 718 hashinfo |= LIO_RSS_HASH_IPV4; 719 rss_state->ip = 1; 720 } else { 721 rss_state->ip = 0; 722 } 723 724 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { 725 hashinfo |= LIO_RSS_HASH_TCP_IPV4; 726 rss_state->tcp_hash = 1; 727 } else { 728 rss_state->tcp_hash = 0; 729 } 730 731 if (rss_conf->rss_hf & ETH_RSS_IPV6) { 732 hashinfo |= LIO_RSS_HASH_IPV6; 733 rss_state->ipv6 = 1; 734 } else { 735 rss_state->ipv6 = 0; 736 } 737 738 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { 739 hashinfo |= LIO_RSS_HASH_TCP_IPV6; 740 rss_state->ipv6_tcp_hash = 1; 741 } else { 742 rss_state->ipv6_tcp_hash = 0; 743 } 744 745 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) { 746 hashinfo |= LIO_RSS_HASH_IPV6_EX; 747 rss_state->ipv6_ex = 1; 748 } else { 749 rss_state->ipv6_ex = 0; 750 } 751 752 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) { 753 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX; 754 rss_state->ipv6_tcp_ex_hash = 1; 755 } else { 756 rss_state->ipv6_tcp_ex_hash = 0; 757 } 758 759 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED; 760 rss_param->param.hashinfo = hashinfo; 761 } 762 763 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3); 764 765 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 766 lio_dev_err(lio_dev, "Failed to set rss hash\n"); 767 return -1; 768 } 769 770 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 771 lio_dev_err(lio_dev, "Set rss hash timed out\n"); 772 return -1; 773 } 774 775 return 0; 776 } 777 778 /** 779 * Add vxlan dest udp port for an interface. 780 * 781 * @param eth_dev 782 * Pointer to the structure rte_eth_dev 783 * @param udp_tnl 784 * udp tunnel conf 785 * 786 * @return 787 * On success return 0 788 * On failure return -1 789 */ 790 static int 791 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev, 792 struct rte_eth_udp_tunnel *udp_tnl) 793 { 794 struct lio_device *lio_dev = LIO_DEV(eth_dev); 795 struct lio_dev_ctrl_cmd ctrl_cmd; 796 struct lio_ctrl_pkt ctrl_pkt; 797 798 if (udp_tnl == NULL) 799 return -EINVAL; 800 801 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { 802 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 803 return -1; 804 } 805 806 /* flush added to prevent cmd failure 807 * incase the queue is full 808 */ 809 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 810 811 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 812 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 813 814 ctrl_cmd.eth_dev = eth_dev; 815 ctrl_cmd.cond = 0; 816 817 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 818 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 819 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD; 820 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 821 822 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 823 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n"); 824 return -1; 825 } 826 827 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 828 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n"); 829 return -1; 830 } 831 832 return 0; 833 } 834 835 /** 836 * Remove vxlan dest udp port for an interface. 837 * 838 * @param eth_dev 839 * Pointer to the structure rte_eth_dev 840 * @param udp_tnl 841 * udp tunnel conf 842 * 843 * @return 844 * On success return 0 845 * On failure return -1 846 */ 847 static int 848 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev, 849 struct rte_eth_udp_tunnel *udp_tnl) 850 { 851 struct lio_device *lio_dev = LIO_DEV(eth_dev); 852 struct lio_dev_ctrl_cmd ctrl_cmd; 853 struct lio_ctrl_pkt ctrl_pkt; 854 855 if (udp_tnl == NULL) 856 return -EINVAL; 857 858 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) { 859 lio_dev_err(lio_dev, "Unsupported tunnel type\n"); 860 return -1; 861 } 862 863 /* flush added to prevent cmd failure 864 * incase the queue is full 865 */ 866 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 867 868 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 869 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 870 871 ctrl_cmd.eth_dev = eth_dev; 872 ctrl_cmd.cond = 0; 873 874 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG; 875 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port; 876 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL; 877 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 878 879 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 880 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n"); 881 return -1; 882 } 883 884 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 885 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n"); 886 return -1; 887 } 888 889 return 0; 890 } 891 892 static int 893 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) 894 { 895 struct lio_device *lio_dev = LIO_DEV(eth_dev); 896 struct lio_dev_ctrl_cmd ctrl_cmd; 897 struct lio_ctrl_pkt ctrl_pkt; 898 899 if (lio_dev->linfo.vlan_is_admin_assigned) 900 return -EPERM; 901 902 /* flush added to prevent cmd failure 903 * incase the queue is full 904 */ 905 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 906 907 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 908 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 909 910 ctrl_cmd.eth_dev = eth_dev; 911 ctrl_cmd.cond = 0; 912 913 ctrl_pkt.ncmd.s.cmd = on ? 914 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER; 915 ctrl_pkt.ncmd.s.param1 = vlan_id; 916 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 917 918 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 919 lio_dev_err(lio_dev, "Failed to %s VLAN port\n", 920 on ? "add" : "remove"); 921 return -1; 922 } 923 924 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { 925 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n", 926 on ? "add" : "remove"); 927 return -1; 928 } 929 930 return 0; 931 } 932 933 /** 934 * Atomically writes the link status information into global 935 * structure rte_eth_dev. 936 * 937 * @param eth_dev 938 * - Pointer to the structure rte_eth_dev to read from. 939 * - Pointer to the buffer to be saved with the link status. 940 * 941 * @return 942 * - On success, zero. 943 * - On failure, negative value. 944 */ 945 static inline int 946 lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev, 947 struct rte_eth_link *link) 948 { 949 struct rte_eth_link *dst = ð_dev->data->dev_link; 950 struct rte_eth_link *src = link; 951 952 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 953 *(uint64_t *)src) == 0) 954 return -1; 955 956 return 0; 957 } 958 959 static uint64_t 960 lio_hweight64(uint64_t w) 961 { 962 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul); 963 964 res = 965 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); 966 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; 967 res = res + (res >> 8); 968 res = res + (res >> 16); 969 970 return (res + (res >> 32)) & 0x00000000000000FFul; 971 } 972 973 static int 974 lio_dev_link_update(struct rte_eth_dev *eth_dev, 975 int wait_to_complete __rte_unused) 976 { 977 struct lio_device *lio_dev = LIO_DEV(eth_dev); 978 struct rte_eth_link link, old; 979 980 /* Initialize */ 981 link.link_status = ETH_LINK_DOWN; 982 link.link_speed = ETH_SPEED_NUM_NONE; 983 link.link_duplex = ETH_LINK_HALF_DUPLEX; 984 link.link_autoneg = ETH_LINK_AUTONEG; 985 memset(&old, 0, sizeof(old)); 986 987 /* Return what we found */ 988 if (lio_dev->linfo.link.s.link_up == 0) { 989 /* Interface is down */ 990 if (lio_dev_atomic_write_link_status(eth_dev, &link)) 991 return -1; 992 if (link.link_status == old.link_status) 993 return -1; 994 return 0; 995 } 996 997 link.link_status = ETH_LINK_UP; /* Interface is up */ 998 link.link_duplex = ETH_LINK_FULL_DUPLEX; 999 switch (lio_dev->linfo.link.s.speed) { 1000 case LIO_LINK_SPEED_10000: 1001 link.link_speed = ETH_SPEED_NUM_10G; 1002 break; 1003 case LIO_LINK_SPEED_25000: 1004 link.link_speed = ETH_SPEED_NUM_25G; 1005 break; 1006 default: 1007 link.link_speed = ETH_SPEED_NUM_NONE; 1008 link.link_duplex = ETH_LINK_HALF_DUPLEX; 1009 } 1010 1011 if (lio_dev_atomic_write_link_status(eth_dev, &link)) 1012 return -1; 1013 1014 if (link.link_status == old.link_status) 1015 return -1; 1016 1017 return 0; 1018 } 1019 1020 /** 1021 * \brief Net device enable, disable allmulticast 1022 * @param eth_dev Pointer to the structure rte_eth_dev 1023 */ 1024 static void 1025 lio_change_dev_flag(struct rte_eth_dev *eth_dev) 1026 { 1027 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1028 struct lio_dev_ctrl_cmd ctrl_cmd; 1029 struct lio_ctrl_pkt ctrl_pkt; 1030 1031 /* flush added to prevent cmd failure 1032 * incase the queue is full 1033 */ 1034 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1035 1036 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1037 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1038 1039 ctrl_cmd.eth_dev = eth_dev; 1040 ctrl_cmd.cond = 0; 1041 1042 /* Create a ctrl pkt command to be sent to core app. */ 1043 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS; 1044 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags; 1045 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1046 1047 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1048 lio_dev_err(lio_dev, "Failed to send change flag message\n"); 1049 return; 1050 } 1051 1052 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1053 lio_dev_err(lio_dev, "Change dev flag command timed out\n"); 1054 } 1055 1056 static void 1057 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 1058 { 1059 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1060 1061 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 1062 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1063 LIO_VF_TRUST_MIN_VERSION); 1064 return; 1065 } 1066 1067 if (!lio_dev->intf_open) { 1068 lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n", 1069 lio_dev->port_id); 1070 return; 1071 } 1072 1073 lio_dev->ifflags |= LIO_IFFLAG_PROMISC; 1074 lio_change_dev_flag(eth_dev); 1075 } 1076 1077 static void 1078 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 1079 { 1080 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1081 1082 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { 1083 lio_dev_err(lio_dev, "Require firmware version >= %s\n", 1084 LIO_VF_TRUST_MIN_VERSION); 1085 return; 1086 } 1087 1088 if (!lio_dev->intf_open) { 1089 lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n", 1090 lio_dev->port_id); 1091 return; 1092 } 1093 1094 lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC; 1095 lio_change_dev_flag(eth_dev); 1096 } 1097 1098 static void 1099 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 1100 { 1101 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1102 1103 if (!lio_dev->intf_open) { 1104 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n", 1105 lio_dev->port_id); 1106 return; 1107 } 1108 1109 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI; 1110 lio_change_dev_flag(eth_dev); 1111 } 1112 1113 static void 1114 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 1115 { 1116 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1117 1118 if (!lio_dev->intf_open) { 1119 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n", 1120 lio_dev->port_id); 1121 return; 1122 } 1123 1124 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI; 1125 lio_change_dev_flag(eth_dev); 1126 } 1127 1128 static void 1129 lio_dev_rss_configure(struct rte_eth_dev *eth_dev) 1130 { 1131 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1132 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1133 struct rte_eth_rss_reta_entry64 reta_conf[8]; 1134 struct rte_eth_rss_conf rss_conf; 1135 uint16_t i; 1136 1137 /* Configure the RSS key and the RSS protocols used to compute 1138 * the RSS hash of input packets. 1139 */ 1140 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1141 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) { 1142 rss_state->hash_disable = 1; 1143 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1144 return; 1145 } 1146 1147 if (rss_conf.rss_key == NULL) 1148 rss_conf.rss_key = lio_rss_key; /* Default hash key */ 1149 1150 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1151 1152 memset(reta_conf, 0, sizeof(reta_conf)); 1153 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) { 1154 uint8_t q_idx, conf_idx, reta_idx; 1155 1156 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ? 1157 i % eth_dev->data->nb_rx_queues : 0); 1158 conf_idx = i / RTE_RETA_GROUP_SIZE; 1159 reta_idx = i % RTE_RETA_GROUP_SIZE; 1160 reta_conf[conf_idx].reta[reta_idx] = q_idx; 1161 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx); 1162 } 1163 1164 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ); 1165 } 1166 1167 static void 1168 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev) 1169 { 1170 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1171 struct lio_rss_ctx *rss_state = &lio_dev->rss_state; 1172 struct rte_eth_rss_conf rss_conf; 1173 1174 switch (eth_dev->data->dev_conf.rxmode.mq_mode) { 1175 case ETH_MQ_RX_RSS: 1176 lio_dev_rss_configure(eth_dev); 1177 break; 1178 case ETH_MQ_RX_NONE: 1179 /* if mq_mode is none, disable rss mode. */ 1180 default: 1181 memset(&rss_conf, 0, sizeof(rss_conf)); 1182 rss_state->hash_disable = 1; 1183 lio_dev_rss_hash_update(eth_dev, &rss_conf); 1184 } 1185 } 1186 1187 /** 1188 * Setup our receive queue/ringbuffer. This is the 1189 * queue the Octeon uses to send us packets and 1190 * responses. We are given a memory pool for our 1191 * packet buffers that are used to populate the receive 1192 * queue. 1193 * 1194 * @param eth_dev 1195 * Pointer to the structure rte_eth_dev 1196 * @param q_no 1197 * Queue number 1198 * @param num_rx_descs 1199 * Number of entries in the queue 1200 * @param socket_id 1201 * Where to allocate memory 1202 * @param rx_conf 1203 * Pointer to the struction rte_eth_rxconf 1204 * @param mp 1205 * Pointer to the packet pool 1206 * 1207 * @return 1208 * - On success, return 0 1209 * - On failure, return -1 1210 */ 1211 static int 1212 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1213 uint16_t num_rx_descs, unsigned int socket_id, 1214 const struct rte_eth_rxconf *rx_conf __rte_unused, 1215 struct rte_mempool *mp) 1216 { 1217 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1218 struct rte_pktmbuf_pool_private *mbp_priv; 1219 uint32_t fw_mapped_oq; 1220 uint16_t buf_size; 1221 1222 if (q_no >= lio_dev->nb_rx_queues) { 1223 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no); 1224 return -EINVAL; 1225 } 1226 1227 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no); 1228 1229 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no; 1230 1231 if ((lio_dev->droq[fw_mapped_oq]) && 1232 (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) { 1233 lio_dev_err(lio_dev, 1234 "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n", 1235 lio_dev->droq[fw_mapped_oq]->max_count); 1236 return -ENOTSUP; 1237 } 1238 1239 mbp_priv = rte_mempool_get_priv(mp); 1240 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; 1241 1242 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp, 1243 socket_id)) { 1244 lio_dev_err(lio_dev, "droq allocation failed\n"); 1245 return -1; 1246 } 1247 1248 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq]; 1249 1250 return 0; 1251 } 1252 1253 /** 1254 * Release the receive queue/ringbuffer. Called by 1255 * the upper layers. 1256 * 1257 * @param rxq 1258 * Opaque pointer to the receive queue to release 1259 * 1260 * @return 1261 * - nothing 1262 */ 1263 void 1264 lio_dev_rx_queue_release(void *rxq) 1265 { 1266 struct lio_droq *droq = rxq; 1267 int oq_no; 1268 1269 if (droq) { 1270 /* Run time queue deletion not supported */ 1271 if (droq->lio_dev->port_configured) 1272 return; 1273 1274 oq_no = droq->q_no; 1275 lio_delete_droq_queue(droq->lio_dev, oq_no); 1276 } 1277 } 1278 1279 /** 1280 * Allocate and initialize SW ring. Initialize associated HW registers. 1281 * 1282 * @param eth_dev 1283 * Pointer to structure rte_eth_dev 1284 * 1285 * @param q_no 1286 * Queue number 1287 * 1288 * @param num_tx_descs 1289 * Number of ringbuffer descriptors 1290 * 1291 * @param socket_id 1292 * NUMA socket id, used for memory allocations 1293 * 1294 * @param tx_conf 1295 * Pointer to the structure rte_eth_txconf 1296 * 1297 * @return 1298 * - On success, return 0 1299 * - On failure, return -errno value 1300 */ 1301 static int 1302 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, 1303 uint16_t num_tx_descs, unsigned int socket_id, 1304 const struct rte_eth_txconf *tx_conf __rte_unused) 1305 { 1306 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1307 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no; 1308 int retval; 1309 1310 if (q_no >= lio_dev->nb_tx_queues) { 1311 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no); 1312 return -EINVAL; 1313 } 1314 1315 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no); 1316 1317 if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) && 1318 (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) { 1319 lio_dev_err(lio_dev, 1320 "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n", 1321 lio_dev->instr_queue[fw_mapped_iq]->max_count); 1322 return -ENOTSUP; 1323 } 1324 1325 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no], 1326 num_tx_descs, lio_dev, socket_id); 1327 1328 if (retval) { 1329 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n"); 1330 return retval; 1331 } 1332 1333 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq, 1334 lio_dev->instr_queue[fw_mapped_iq]->max_count, 1335 socket_id); 1336 1337 if (retval) { 1338 lio_delete_instruction_queue(lio_dev, fw_mapped_iq); 1339 return retval; 1340 } 1341 1342 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq]; 1343 1344 return 0; 1345 } 1346 1347 /** 1348 * Release the transmit queue/ringbuffer. Called by 1349 * the upper layers. 1350 * 1351 * @param txq 1352 * Opaque pointer to the transmit queue to release 1353 * 1354 * @return 1355 * - nothing 1356 */ 1357 void 1358 lio_dev_tx_queue_release(void *txq) 1359 { 1360 struct lio_instr_queue *tq = txq; 1361 uint32_t fw_mapped_iq_no; 1362 1363 1364 if (tq) { 1365 /* Run time queue deletion not supported */ 1366 if (tq->lio_dev->port_configured) 1367 return; 1368 1369 /* Free sg_list */ 1370 lio_delete_sglist(tq); 1371 1372 fw_mapped_iq_no = tq->txpciq.s.q_no; 1373 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no); 1374 } 1375 } 1376 1377 /** 1378 * Api to check link state. 1379 */ 1380 static void 1381 lio_dev_get_link_status(struct rte_eth_dev *eth_dev) 1382 { 1383 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1384 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1385 struct lio_link_status_resp *resp; 1386 union octeon_link_status *ls; 1387 struct lio_soft_command *sc; 1388 uint32_t resp_size; 1389 1390 if (!lio_dev->intf_open) 1391 return; 1392 1393 resp_size = sizeof(struct lio_link_status_resp); 1394 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1395 if (sc == NULL) 1396 return; 1397 1398 resp = (struct lio_link_status_resp *)sc->virtrptr; 1399 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1400 LIO_OPCODE_INFO, 0, 0, 0); 1401 1402 /* Setting wait time in seconds */ 1403 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1404 1405 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED) 1406 goto get_status_fail; 1407 1408 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1409 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1410 rte_delay_ms(1); 1411 } 1412 1413 if (resp->status) 1414 goto get_status_fail; 1415 1416 ls = &resp->link_info.link; 1417 1418 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3); 1419 1420 if (lio_dev->linfo.link.link_status64 != ls->link_status64) { 1421 if (ls->s.mtu < eth_dev->data->mtu) { 1422 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n", 1423 ls->s.mtu); 1424 eth_dev->data->mtu = ls->s.mtu; 1425 } 1426 lio_dev->linfo.link.link_status64 = ls->link_status64; 1427 lio_dev_link_update(eth_dev, 0); 1428 } 1429 1430 lio_free_soft_command(sc); 1431 1432 return; 1433 1434 get_status_fail: 1435 lio_free_soft_command(sc); 1436 } 1437 1438 /* This function will be invoked every LSC_TIMEOUT ns (100ms) 1439 * and will update link state if it changes. 1440 */ 1441 static void 1442 lio_sync_link_state_check(void *eth_dev) 1443 { 1444 struct lio_device *lio_dev = 1445 (((struct rte_eth_dev *)eth_dev)->data->dev_private); 1446 1447 if (lio_dev->port_configured) 1448 lio_dev_get_link_status(eth_dev); 1449 1450 /* Schedule periodic link status check. 1451 * Stop check if interface is close and start again while opening. 1452 */ 1453 if (lio_dev->intf_open) 1454 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, 1455 eth_dev); 1456 } 1457 1458 static int 1459 lio_dev_start(struct rte_eth_dev *eth_dev) 1460 { 1461 uint16_t mtu; 1462 uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1463 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1464 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1465 int ret = 0; 1466 1467 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id); 1468 1469 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 1470 return -1; 1471 1472 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) 1473 return -1; 1474 1475 /* Ready for link status updates */ 1476 lio_dev->intf_open = 1; 1477 rte_mb(); 1478 1479 /* Configure RSS if device configured with multiple RX queues. */ 1480 lio_dev_mq_rx_configure(eth_dev); 1481 1482 /* start polling for lsc */ 1483 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, 1484 lio_sync_link_state_check, 1485 eth_dev); 1486 if (ret) { 1487 lio_dev_err(lio_dev, 1488 "link state check handler creation failed\n"); 1489 goto dev_lsc_handle_error; 1490 } 1491 1492 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout)) 1493 rte_delay_ms(1); 1494 1495 if (lio_dev->linfo.link.link_status64 == 0) { 1496 ret = -1; 1497 goto dev_mtu_set_error; 1498 } 1499 1500 mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN); 1501 if (mtu < ETHER_MIN_MTU) 1502 mtu = ETHER_MIN_MTU; 1503 1504 if (eth_dev->data->mtu != mtu) { 1505 ret = lio_dev_mtu_set(eth_dev, mtu); 1506 if (ret) 1507 goto dev_mtu_set_error; 1508 } 1509 1510 return 0; 1511 1512 dev_mtu_set_error: 1513 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1514 1515 dev_lsc_handle_error: 1516 lio_dev->intf_open = 0; 1517 lio_send_rx_ctrl_cmd(eth_dev, 0); 1518 1519 return ret; 1520 } 1521 1522 /* Stop device and disable input/output functions */ 1523 static void 1524 lio_dev_stop(struct rte_eth_dev *eth_dev) 1525 { 1526 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1527 1528 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id); 1529 lio_dev->intf_open = 0; 1530 rte_mb(); 1531 1532 /* Cancel callback if still running. */ 1533 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); 1534 1535 lio_send_rx_ctrl_cmd(eth_dev, 0); 1536 1537 /* Clear recorded link status */ 1538 lio_dev->linfo.link.link_status64 = 0; 1539 } 1540 1541 static int 1542 lio_dev_set_link_up(struct rte_eth_dev *eth_dev) 1543 { 1544 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1545 1546 if (!lio_dev->intf_open) { 1547 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1548 return 0; 1549 } 1550 1551 if (lio_dev->linfo.link.s.link_up) { 1552 lio_dev_info(lio_dev, "Link is already UP\n"); 1553 return 0; 1554 } 1555 1556 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) { 1557 lio_dev_err(lio_dev, "Unable to set Link UP\n"); 1558 return -1; 1559 } 1560 1561 lio_dev->linfo.link.s.link_up = 1; 1562 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 1563 1564 return 0; 1565 } 1566 1567 static int 1568 lio_dev_set_link_down(struct rte_eth_dev *eth_dev) 1569 { 1570 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1571 1572 if (!lio_dev->intf_open) { 1573 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n"); 1574 return 0; 1575 } 1576 1577 if (!lio_dev->linfo.link.s.link_up) { 1578 lio_dev_info(lio_dev, "Link is already DOWN\n"); 1579 return 0; 1580 } 1581 1582 lio_dev->linfo.link.s.link_up = 0; 1583 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 1584 1585 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) { 1586 lio_dev->linfo.link.s.link_up = 1; 1587 eth_dev->data->dev_link.link_status = ETH_LINK_UP; 1588 lio_dev_err(lio_dev, "Unable to set Link Down\n"); 1589 return -1; 1590 } 1591 1592 return 0; 1593 } 1594 1595 /** 1596 * Reset and stop the device. This occurs on the first 1597 * call to this routine. Subsequent calls will simply 1598 * return. NB: This will require the NIC to be rebooted. 1599 * 1600 * @param eth_dev 1601 * Pointer to the structure rte_eth_dev 1602 * 1603 * @return 1604 * - nothing 1605 */ 1606 static void 1607 lio_dev_close(struct rte_eth_dev *eth_dev) 1608 { 1609 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1610 uint32_t i; 1611 1612 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id); 1613 1614 if (lio_dev->intf_open) 1615 lio_dev_stop(eth_dev); 1616 1617 lio_wait_for_instr_fetch(lio_dev); 1618 1619 lio_dev->fn_list.disable_io_queues(lio_dev); 1620 1621 cn23xx_vf_set_io_queues_off(lio_dev); 1622 1623 /* Reset iq regs (IQ_DBELL). 1624 * Clear sli_pktx_cnts (OQ_PKTS_SENT). 1625 */ 1626 for (i = 0; i < lio_dev->nb_rx_queues; i++) { 1627 struct lio_droq *droq = lio_dev->droq[i]; 1628 1629 if (droq == NULL) 1630 break; 1631 1632 uint32_t pkt_count = rte_read32(droq->pkts_sent_reg); 1633 1634 lio_dev_dbg(lio_dev, 1635 "pending oq count %u\n", pkt_count); 1636 rte_write32(pkt_count, droq->pkts_sent_reg); 1637 } 1638 1639 if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) { 1640 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 1641 rte_delay_ms(LIO_PCI_FLR_WAIT); 1642 } 1643 1644 /* lio_free_mbox */ 1645 lio_dev->fn_list.free_mbox(lio_dev); 1646 1647 /* Free glist resources */ 1648 rte_free(lio_dev->glist_head); 1649 rte_free(lio_dev->glist_lock); 1650 lio_dev->glist_head = NULL; 1651 lio_dev->glist_lock = NULL; 1652 1653 lio_dev->port_configured = 0; 1654 1655 /* Delete all queues */ 1656 lio_dev_clear_queues(eth_dev); 1657 } 1658 1659 /** 1660 * Enable tunnel rx checksum verification from firmware. 1661 */ 1662 static void 1663 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev) 1664 { 1665 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1666 struct lio_dev_ctrl_cmd ctrl_cmd; 1667 struct lio_ctrl_pkt ctrl_pkt; 1668 1669 /* flush added to prevent cmd failure 1670 * incase the queue is full 1671 */ 1672 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1673 1674 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1675 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1676 1677 ctrl_cmd.eth_dev = eth_dev; 1678 ctrl_cmd.cond = 0; 1679 1680 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL; 1681 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE; 1682 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1683 1684 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1685 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n"); 1686 return; 1687 } 1688 1689 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1690 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n"); 1691 } 1692 1693 /** 1694 * Enable checksum calculation for inner packet in a tunnel. 1695 */ 1696 static void 1697 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev) 1698 { 1699 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1700 struct lio_dev_ctrl_cmd ctrl_cmd; 1701 struct lio_ctrl_pkt ctrl_pkt; 1702 1703 /* flush added to prevent cmd failure 1704 * incase the queue is full 1705 */ 1706 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); 1707 1708 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); 1709 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); 1710 1711 ctrl_cmd.eth_dev = eth_dev; 1712 ctrl_cmd.cond = 0; 1713 1714 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL; 1715 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE; 1716 ctrl_pkt.ctrl_cmd = &ctrl_cmd; 1717 1718 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { 1719 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n"); 1720 return; 1721 } 1722 1723 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) 1724 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n"); 1725 } 1726 1727 static int lio_dev_configure(struct rte_eth_dev *eth_dev) 1728 { 1729 struct lio_device *lio_dev = LIO_DEV(eth_dev); 1730 uint16_t timeout = LIO_MAX_CMD_TIMEOUT; 1731 int retval, num_iqueues, num_oqueues; 1732 uint8_t mac[ETHER_ADDR_LEN], i; 1733 struct lio_if_cfg_resp *resp; 1734 struct lio_soft_command *sc; 1735 union lio_if_cfg if_cfg; 1736 uint32_t resp_size; 1737 1738 PMD_INIT_FUNC_TRACE(); 1739 1740 /* Re-configuring firmware not supported. 1741 * Can't change tx/rx queues per port from initial value. 1742 */ 1743 if (lio_dev->port_configured) { 1744 if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) || 1745 (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) { 1746 lio_dev_err(lio_dev, 1747 "rxq/txq re-conf not supported. Restart application with new value.\n"); 1748 return -ENOTSUP; 1749 } 1750 return 0; 1751 } 1752 1753 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues; 1754 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues; 1755 1756 resp_size = sizeof(struct lio_if_cfg_resp); 1757 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); 1758 if (sc == NULL) 1759 return -ENOMEM; 1760 1761 resp = (struct lio_if_cfg_resp *)sc->virtrptr; 1762 1763 /* Firmware doesn't have capability to reconfigure the queues, 1764 * Claim all queues, and use as many required 1765 */ 1766 if_cfg.if_cfg64 = 0; 1767 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues; 1768 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues; 1769 if_cfg.s.base_queue = 0; 1770 1771 if_cfg.s.gmx_port_id = lio_dev->pf_num; 1772 1773 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE, 1774 LIO_OPCODE_IF_CFG, 0, 1775 if_cfg.if_cfg64, 0); 1776 1777 /* Setting wait time in seconds */ 1778 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000; 1779 1780 retval = lio_send_soft_command(lio_dev, sc); 1781 if (retval == LIO_IQ_SEND_FAILED) { 1782 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n", 1783 retval); 1784 /* Soft instr is freed by driver in case of failure. */ 1785 goto nic_config_fail; 1786 } 1787 1788 /* Sleep on a wait queue till the cond flag indicates that the 1789 * response arrived or timed-out. 1790 */ 1791 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) { 1792 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]); 1793 lio_process_ordered_list(lio_dev); 1794 rte_delay_ms(1); 1795 } 1796 1797 retval = resp->status; 1798 if (retval) { 1799 lio_dev_err(lio_dev, "iq/oq config failed\n"); 1800 goto nic_config_fail; 1801 } 1802 1803 snprintf(lio_dev->firmware_version, LIO_FW_VERSION_LENGTH, "%s", 1804 resp->cfg_info.lio_firmware_version); 1805 1806 lio_swap_8B_data((uint64_t *)(&resp->cfg_info), 1807 sizeof(struct octeon_if_cfg_info) >> 3); 1808 1809 num_iqueues = lio_hweight64(resp->cfg_info.iqmask); 1810 num_oqueues = lio_hweight64(resp->cfg_info.oqmask); 1811 1812 if (!(num_iqueues) || !(num_oqueues)) { 1813 lio_dev_err(lio_dev, 1814 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n", 1815 (unsigned long)resp->cfg_info.iqmask, 1816 (unsigned long)resp->cfg_info.oqmask); 1817 goto nic_config_fail; 1818 } 1819 1820 lio_dev_dbg(lio_dev, 1821 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n", 1822 eth_dev->data->port_id, 1823 (unsigned long)resp->cfg_info.iqmask, 1824 (unsigned long)resp->cfg_info.oqmask, 1825 num_iqueues, num_oqueues); 1826 1827 lio_dev->linfo.num_rxpciq = num_oqueues; 1828 lio_dev->linfo.num_txpciq = num_iqueues; 1829 1830 for (i = 0; i < num_oqueues; i++) { 1831 lio_dev->linfo.rxpciq[i].rxpciq64 = 1832 resp->cfg_info.linfo.rxpciq[i].rxpciq64; 1833 lio_dev_dbg(lio_dev, "index %d OQ %d\n", 1834 i, lio_dev->linfo.rxpciq[i].s.q_no); 1835 } 1836 1837 for (i = 0; i < num_iqueues; i++) { 1838 lio_dev->linfo.txpciq[i].txpciq64 = 1839 resp->cfg_info.linfo.txpciq[i].txpciq64; 1840 lio_dev_dbg(lio_dev, "index %d IQ %d\n", 1841 i, lio_dev->linfo.txpciq[i].s.q_no); 1842 } 1843 1844 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1845 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1846 lio_dev->linfo.link.link_status64 = 1847 resp->cfg_info.linfo.link.link_status64; 1848 1849 /* 64-bit swap required on LE machines */ 1850 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1); 1851 for (i = 0; i < ETHER_ADDR_LEN; i++) 1852 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) + 1853 2 + i)); 1854 1855 /* Copy the permanent MAC address */ 1856 ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]); 1857 1858 /* enable firmware checksum support for tunnel packets */ 1859 lio_enable_hw_tunnel_rx_checksum(eth_dev); 1860 lio_enable_hw_tunnel_tx_checksum(eth_dev); 1861 1862 lio_dev->glist_lock = 1863 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0); 1864 if (lio_dev->glist_lock == NULL) 1865 return -ENOMEM; 1866 1867 lio_dev->glist_head = 1868 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues, 1869 0); 1870 if (lio_dev->glist_head == NULL) { 1871 rte_free(lio_dev->glist_lock); 1872 lio_dev->glist_lock = NULL; 1873 return -ENOMEM; 1874 } 1875 1876 lio_dev_link_update(eth_dev, 0); 1877 1878 lio_dev->port_configured = 1; 1879 1880 lio_free_soft_command(sc); 1881 1882 /* Disable iq_0 for reconf */ 1883 lio_dev->fn_list.disable_io_queues(lio_dev); 1884 1885 /* Reset ioq regs */ 1886 lio_dev->fn_list.setup_device_regs(lio_dev); 1887 1888 /* Free iq_0 used during init */ 1889 lio_free_instr_queue0(lio_dev); 1890 1891 return 0; 1892 1893 nic_config_fail: 1894 lio_dev_err(lio_dev, "Failed retval %d\n", retval); 1895 lio_free_soft_command(sc); 1896 lio_free_instr_queue0(lio_dev); 1897 1898 return -ENODEV; 1899 } 1900 1901 /* Define our ethernet definitions */ 1902 static const struct eth_dev_ops liovf_eth_dev_ops = { 1903 .dev_configure = lio_dev_configure, 1904 .dev_start = lio_dev_start, 1905 .dev_stop = lio_dev_stop, 1906 .dev_set_link_up = lio_dev_set_link_up, 1907 .dev_set_link_down = lio_dev_set_link_down, 1908 .dev_close = lio_dev_close, 1909 .promiscuous_enable = lio_dev_promiscuous_enable, 1910 .promiscuous_disable = lio_dev_promiscuous_disable, 1911 .allmulticast_enable = lio_dev_allmulticast_enable, 1912 .allmulticast_disable = lio_dev_allmulticast_disable, 1913 .link_update = lio_dev_link_update, 1914 .stats_get = lio_dev_stats_get, 1915 .xstats_get = lio_dev_xstats_get, 1916 .xstats_get_names = lio_dev_xstats_get_names, 1917 .stats_reset = lio_dev_stats_reset, 1918 .xstats_reset = lio_dev_xstats_reset, 1919 .dev_infos_get = lio_dev_info_get, 1920 .vlan_filter_set = lio_dev_vlan_filter_set, 1921 .rx_queue_setup = lio_dev_rx_queue_setup, 1922 .rx_queue_release = lio_dev_rx_queue_release, 1923 .tx_queue_setup = lio_dev_tx_queue_setup, 1924 .tx_queue_release = lio_dev_tx_queue_release, 1925 .reta_update = lio_dev_rss_reta_update, 1926 .reta_query = lio_dev_rss_reta_query, 1927 .rss_hash_conf_get = lio_dev_rss_hash_conf_get, 1928 .rss_hash_update = lio_dev_rss_hash_update, 1929 .udp_tunnel_port_add = lio_dev_udp_tunnel_add, 1930 .udp_tunnel_port_del = lio_dev_udp_tunnel_del, 1931 .mtu_set = lio_dev_mtu_set, 1932 }; 1933 1934 static void 1935 lio_check_pf_hs_response(void *lio_dev) 1936 { 1937 struct lio_device *dev = lio_dev; 1938 1939 /* check till response arrives */ 1940 if (dev->pfvf_hsword.coproc_tics_per_us) 1941 return; 1942 1943 cn23xx_vf_handle_mbox(dev); 1944 1945 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev); 1946 } 1947 1948 /** 1949 * \brief Identify the LIO device and to map the BAR address space 1950 * @param lio_dev lio device 1951 */ 1952 static int 1953 lio_chip_specific_setup(struct lio_device *lio_dev) 1954 { 1955 struct rte_pci_device *pdev = lio_dev->pci_dev; 1956 uint32_t dev_id = pdev->id.device_id; 1957 const char *s; 1958 int ret = 1; 1959 1960 switch (dev_id) { 1961 case LIO_CN23XX_VF_VID: 1962 lio_dev->chip_id = LIO_CN23XX_VF_VID; 1963 ret = cn23xx_vf_setup_device(lio_dev); 1964 s = "CN23XX VF"; 1965 break; 1966 default: 1967 s = "?"; 1968 lio_dev_err(lio_dev, "Unsupported Chip\n"); 1969 } 1970 1971 if (!ret) 1972 lio_dev_info(lio_dev, "DEVICE : %s\n", s); 1973 1974 return ret; 1975 } 1976 1977 static int 1978 lio_first_time_init(struct lio_device *lio_dev, 1979 struct rte_pci_device *pdev) 1980 { 1981 int dpdk_queues; 1982 1983 PMD_INIT_FUNC_TRACE(); 1984 1985 /* set dpdk specific pci device pointer */ 1986 lio_dev->pci_dev = pdev; 1987 1988 /* Identify the LIO type and set device ops */ 1989 if (lio_chip_specific_setup(lio_dev)) { 1990 lio_dev_err(lio_dev, "Chip specific setup failed\n"); 1991 return -1; 1992 } 1993 1994 /* Initialize soft command buffer pool */ 1995 if (lio_setup_sc_buffer_pool(lio_dev)) { 1996 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n"); 1997 return -1; 1998 } 1999 2000 /* Initialize lists to manage the requests of different types that 2001 * arrive from applications for this lio device. 2002 */ 2003 lio_setup_response_list(lio_dev); 2004 2005 if (lio_dev->fn_list.setup_mbox(lio_dev)) { 2006 lio_dev_err(lio_dev, "Mailbox setup failed\n"); 2007 goto error; 2008 } 2009 2010 /* Check PF response */ 2011 lio_check_pf_hs_response((void *)lio_dev); 2012 2013 /* Do handshake and exit if incompatible PF driver */ 2014 if (cn23xx_pfvf_handshake(lio_dev)) 2015 goto error; 2016 2017 /* Request and wait for device reset. */ 2018 if (pdev->kdrv == RTE_KDRV_IGB_UIO) { 2019 cn23xx_vf_ask_pf_to_do_flr(lio_dev); 2020 /* FLR wait time doubled as a precaution. */ 2021 rte_delay_ms(LIO_PCI_FLR_WAIT * 2); 2022 } 2023 2024 if (cn23xx_vf_set_io_queues_off(lio_dev)) { 2025 lio_dev_err(lio_dev, "Setting io queues off failed\n"); 2026 goto error; 2027 } 2028 2029 if (lio_dev->fn_list.setup_device_regs(lio_dev)) { 2030 lio_dev_err(lio_dev, "Failed to configure device registers\n"); 2031 goto error; 2032 } 2033 2034 if (lio_setup_instr_queue0(lio_dev)) { 2035 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n"); 2036 goto error; 2037 } 2038 2039 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf; 2040 2041 lio_dev->max_tx_queues = dpdk_queues; 2042 lio_dev->max_rx_queues = dpdk_queues; 2043 2044 /* Enable input and output queues for this device */ 2045 if (lio_dev->fn_list.enable_io_queues(lio_dev)) 2046 goto error; 2047 2048 return 0; 2049 2050 error: 2051 lio_free_sc_buffer_pool(lio_dev); 2052 if (lio_dev->mbox[0]) 2053 lio_dev->fn_list.free_mbox(lio_dev); 2054 if (lio_dev->instr_queue[0]) 2055 lio_free_instr_queue0(lio_dev); 2056 2057 return -1; 2058 } 2059 2060 static int 2061 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2062 { 2063 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2064 2065 PMD_INIT_FUNC_TRACE(); 2066 2067 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2068 return -EPERM; 2069 2070 /* lio_free_sc_buffer_pool */ 2071 lio_free_sc_buffer_pool(lio_dev); 2072 2073 rte_free(eth_dev->data->mac_addrs); 2074 eth_dev->data->mac_addrs = NULL; 2075 2076 eth_dev->dev_ops = NULL; 2077 eth_dev->rx_pkt_burst = NULL; 2078 eth_dev->tx_pkt_burst = NULL; 2079 2080 return 0; 2081 } 2082 2083 static int 2084 lio_eth_dev_init(struct rte_eth_dev *eth_dev) 2085 { 2086 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 2087 struct lio_device *lio_dev = LIO_DEV(eth_dev); 2088 2089 PMD_INIT_FUNC_TRACE(); 2090 2091 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts; 2092 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts; 2093 2094 /* Primary does the initialization. */ 2095 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2096 return 0; 2097 2098 rte_eth_copy_pci_info(eth_dev, pdev); 2099 2100 if (pdev->mem_resource[0].addr) { 2101 lio_dev->hw_addr = pdev->mem_resource[0].addr; 2102 } else { 2103 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n"); 2104 return -ENODEV; 2105 } 2106 2107 lio_dev->eth_dev = eth_dev; 2108 /* set lio device print string */ 2109 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string), 2110 "%s[%02x:%02x.%x]", pdev->driver->driver.name, 2111 pdev->addr.bus, pdev->addr.devid, pdev->addr.function); 2112 2113 lio_dev->port_id = eth_dev->data->port_id; 2114 2115 if (lio_first_time_init(lio_dev, pdev)) { 2116 lio_dev_err(lio_dev, "Device init failed\n"); 2117 return -EINVAL; 2118 } 2119 2120 eth_dev->dev_ops = &liovf_eth_dev_ops; 2121 eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0); 2122 if (eth_dev->data->mac_addrs == NULL) { 2123 lio_dev_err(lio_dev, 2124 "MAC addresses memory allocation failed\n"); 2125 eth_dev->dev_ops = NULL; 2126 eth_dev->rx_pkt_burst = NULL; 2127 eth_dev->tx_pkt_burst = NULL; 2128 return -ENOMEM; 2129 } 2130 2131 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING); 2132 rte_wmb(); 2133 2134 lio_dev->port_configured = 0; 2135 /* Always allow unicast packets */ 2136 lio_dev->ifflags |= LIO_IFFLAG_UNICAST; 2137 2138 return 0; 2139 } 2140 2141 static int 2142 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2143 struct rte_pci_device *pci_dev) 2144 { 2145 struct rte_eth_dev *eth_dev; 2146 int ret; 2147 2148 eth_dev = rte_eth_dev_pci_allocate(pci_dev, 2149 sizeof(struct lio_device)); 2150 if (eth_dev == NULL) 2151 return -ENOMEM; 2152 2153 ret = lio_eth_dev_init(eth_dev); 2154 if (ret) 2155 rte_eth_dev_pci_release(eth_dev); 2156 2157 return ret; 2158 } 2159 2160 static int 2161 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2162 { 2163 return rte_eth_dev_pci_generic_remove(pci_dev, 2164 lio_eth_dev_uninit); 2165 } 2166 2167 /* Set of PCI devices this driver supports */ 2168 static const struct rte_pci_id pci_id_liovf_map[] = { 2169 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) }, 2170 { .vendor_id = 0, /* sentinel */ } 2171 }; 2172 2173 static struct rte_pci_driver rte_liovf_pmd = { 2174 .id_table = pci_id_liovf_map, 2175 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 2176 .probe = lio_eth_dev_pci_probe, 2177 .remove = lio_eth_dev_pci_remove, 2178 }; 2179 2180 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd); 2181 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map); 2182 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci"); 2183