1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2017 Cavium, Inc
32bfe3f2eSlogwang */
42bfe3f2eSlogwang
54418919fSjohnjiang #include <rte_string_fns.h>
6d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
72bfe3f2eSlogwang #include <rte_ethdev_pci.h>
82bfe3f2eSlogwang #include <rte_cycles.h>
92bfe3f2eSlogwang #include <rte_malloc.h>
102bfe3f2eSlogwang #include <rte_alarm.h>
112bfe3f2eSlogwang #include <rte_ether.h>
122bfe3f2eSlogwang
132bfe3f2eSlogwang #include "lio_logs.h"
142bfe3f2eSlogwang #include "lio_23xx_vf.h"
152bfe3f2eSlogwang #include "lio_ethdev.h"
162bfe3f2eSlogwang #include "lio_rxtx.h"
172bfe3f2eSlogwang
182bfe3f2eSlogwang /* Default RSS key in use */
192bfe3f2eSlogwang static uint8_t lio_rss_key[40] = {
202bfe3f2eSlogwang 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
212bfe3f2eSlogwang 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
222bfe3f2eSlogwang 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
232bfe3f2eSlogwang 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
242bfe3f2eSlogwang 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
252bfe3f2eSlogwang };
262bfe3f2eSlogwang
272bfe3f2eSlogwang static const struct rte_eth_desc_lim lio_rx_desc_lim = {
282bfe3f2eSlogwang .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
292bfe3f2eSlogwang .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
302bfe3f2eSlogwang .nb_align = 1,
312bfe3f2eSlogwang };
322bfe3f2eSlogwang
332bfe3f2eSlogwang static const struct rte_eth_desc_lim lio_tx_desc_lim = {
342bfe3f2eSlogwang .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
352bfe3f2eSlogwang .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
362bfe3f2eSlogwang .nb_align = 1,
372bfe3f2eSlogwang };
382bfe3f2eSlogwang
392bfe3f2eSlogwang /* Wait for control command to reach nic. */
402bfe3f2eSlogwang static uint16_t
lio_wait_for_ctrl_cmd(struct lio_device * lio_dev,struct lio_dev_ctrl_cmd * ctrl_cmd)412bfe3f2eSlogwang lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
422bfe3f2eSlogwang struct lio_dev_ctrl_cmd *ctrl_cmd)
432bfe3f2eSlogwang {
442bfe3f2eSlogwang uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
452bfe3f2eSlogwang
462bfe3f2eSlogwang while ((ctrl_cmd->cond == 0) && --timeout) {
472bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
482bfe3f2eSlogwang rte_delay_ms(1);
492bfe3f2eSlogwang }
502bfe3f2eSlogwang
512bfe3f2eSlogwang return !timeout;
522bfe3f2eSlogwang }
532bfe3f2eSlogwang
542bfe3f2eSlogwang /**
552bfe3f2eSlogwang * \brief Send Rx control command
562bfe3f2eSlogwang * @param eth_dev Pointer to the structure rte_eth_dev
572bfe3f2eSlogwang * @param start_stop whether to start or stop
582bfe3f2eSlogwang */
592bfe3f2eSlogwang static int
lio_send_rx_ctrl_cmd(struct rte_eth_dev * eth_dev,int start_stop)602bfe3f2eSlogwang lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
612bfe3f2eSlogwang {
622bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
632bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
642bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
652bfe3f2eSlogwang
662bfe3f2eSlogwang /* flush added to prevent cmd failure
672bfe3f2eSlogwang * incase the queue is full
682bfe3f2eSlogwang */
692bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
702bfe3f2eSlogwang
712bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
722bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
732bfe3f2eSlogwang
742bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
752bfe3f2eSlogwang ctrl_cmd.cond = 0;
762bfe3f2eSlogwang
772bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
782bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = start_stop;
792bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
802bfe3f2eSlogwang
812bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
822bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send RX Control message\n");
832bfe3f2eSlogwang return -1;
842bfe3f2eSlogwang }
852bfe3f2eSlogwang
862bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
872bfe3f2eSlogwang lio_dev_err(lio_dev, "RX Control command timed out\n");
882bfe3f2eSlogwang return -1;
892bfe3f2eSlogwang }
902bfe3f2eSlogwang
912bfe3f2eSlogwang return 0;
922bfe3f2eSlogwang }
932bfe3f2eSlogwang
942bfe3f2eSlogwang /* store statistics names and its offset in stats structure */
952bfe3f2eSlogwang struct rte_lio_xstats_name_off {
962bfe3f2eSlogwang char name[RTE_ETH_XSTATS_NAME_SIZE];
972bfe3f2eSlogwang unsigned int offset;
982bfe3f2eSlogwang };
992bfe3f2eSlogwang
1002bfe3f2eSlogwang static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
1012bfe3f2eSlogwang {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
1022bfe3f2eSlogwang {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
1032bfe3f2eSlogwang {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
1042bfe3f2eSlogwang {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
1052bfe3f2eSlogwang {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
1062bfe3f2eSlogwang {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
1072bfe3f2eSlogwang {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
1082bfe3f2eSlogwang {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
1092bfe3f2eSlogwang {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
1102bfe3f2eSlogwang {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
1112bfe3f2eSlogwang {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
1122bfe3f2eSlogwang {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
1132bfe3f2eSlogwang {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
1142bfe3f2eSlogwang {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
1152bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1162bfe3f2eSlogwang {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
1172bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1182bfe3f2eSlogwang {"tx_broadcast_pkts",
1192bfe3f2eSlogwang (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
1202bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1212bfe3f2eSlogwang {"tx_multicast_pkts",
1222bfe3f2eSlogwang (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
1232bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1242bfe3f2eSlogwang {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
1252bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1262bfe3f2eSlogwang {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
1272bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1282bfe3f2eSlogwang {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
1292bfe3f2eSlogwang total_collisions)) +
1302bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1312bfe3f2eSlogwang {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
1322bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1332bfe3f2eSlogwang {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
1342bfe3f2eSlogwang sizeof(struct octeon_rx_stats)},
1352bfe3f2eSlogwang };
1362bfe3f2eSlogwang
1372bfe3f2eSlogwang #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
1382bfe3f2eSlogwang
1392bfe3f2eSlogwang /* Get hw stats of the port */
1402bfe3f2eSlogwang static int
lio_dev_xstats_get(struct rte_eth_dev * eth_dev,struct rte_eth_xstat * xstats,unsigned int n)1412bfe3f2eSlogwang lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
1422bfe3f2eSlogwang unsigned int n)
1432bfe3f2eSlogwang {
1442bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
1452bfe3f2eSlogwang uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1462bfe3f2eSlogwang struct octeon_link_stats *hw_stats;
1472bfe3f2eSlogwang struct lio_link_stats_resp *resp;
1482bfe3f2eSlogwang struct lio_soft_command *sc;
1492bfe3f2eSlogwang uint32_t resp_size;
1502bfe3f2eSlogwang unsigned int i;
1512bfe3f2eSlogwang int retval;
1522bfe3f2eSlogwang
1532bfe3f2eSlogwang if (!lio_dev->intf_open) {
1542bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down\n",
1552bfe3f2eSlogwang lio_dev->port_id);
1562bfe3f2eSlogwang return -EINVAL;
1572bfe3f2eSlogwang }
1582bfe3f2eSlogwang
1592bfe3f2eSlogwang if (n < LIO_NB_XSTATS)
1602bfe3f2eSlogwang return LIO_NB_XSTATS;
1612bfe3f2eSlogwang
1622bfe3f2eSlogwang resp_size = sizeof(struct lio_link_stats_resp);
1632bfe3f2eSlogwang sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1642bfe3f2eSlogwang if (sc == NULL)
1652bfe3f2eSlogwang return -ENOMEM;
1662bfe3f2eSlogwang
1672bfe3f2eSlogwang resp = (struct lio_link_stats_resp *)sc->virtrptr;
1682bfe3f2eSlogwang lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1692bfe3f2eSlogwang LIO_OPCODE_PORT_STATS, 0, 0, 0);
1702bfe3f2eSlogwang
1712bfe3f2eSlogwang /* Setting wait time in seconds */
1722bfe3f2eSlogwang sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1732bfe3f2eSlogwang
1742bfe3f2eSlogwang retval = lio_send_soft_command(lio_dev, sc);
1752bfe3f2eSlogwang if (retval == LIO_IQ_SEND_FAILED) {
1762bfe3f2eSlogwang lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
1772bfe3f2eSlogwang retval);
1782bfe3f2eSlogwang goto get_stats_fail;
1792bfe3f2eSlogwang }
1802bfe3f2eSlogwang
1812bfe3f2eSlogwang while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1822bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1832bfe3f2eSlogwang lio_process_ordered_list(lio_dev);
1842bfe3f2eSlogwang rte_delay_ms(1);
1852bfe3f2eSlogwang }
1862bfe3f2eSlogwang
1872bfe3f2eSlogwang retval = resp->status;
1882bfe3f2eSlogwang if (retval) {
1892bfe3f2eSlogwang lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
1902bfe3f2eSlogwang goto get_stats_fail;
1912bfe3f2eSlogwang }
1922bfe3f2eSlogwang
1932bfe3f2eSlogwang lio_swap_8B_data((uint64_t *)(&resp->link_stats),
1942bfe3f2eSlogwang sizeof(struct octeon_link_stats) >> 3);
1952bfe3f2eSlogwang
1962bfe3f2eSlogwang hw_stats = &resp->link_stats;
1972bfe3f2eSlogwang
1982bfe3f2eSlogwang for (i = 0; i < LIO_NB_XSTATS; i++) {
1992bfe3f2eSlogwang xstats[i].id = i;
2002bfe3f2eSlogwang xstats[i].value =
2012bfe3f2eSlogwang *(uint64_t *)(((char *)hw_stats) +
2022bfe3f2eSlogwang rte_lio_stats_strings[i].offset);
2032bfe3f2eSlogwang }
2042bfe3f2eSlogwang
2052bfe3f2eSlogwang lio_free_soft_command(sc);
2062bfe3f2eSlogwang
2072bfe3f2eSlogwang return LIO_NB_XSTATS;
2082bfe3f2eSlogwang
2092bfe3f2eSlogwang get_stats_fail:
2102bfe3f2eSlogwang lio_free_soft_command(sc);
2112bfe3f2eSlogwang
2122bfe3f2eSlogwang return -1;
2132bfe3f2eSlogwang }
2142bfe3f2eSlogwang
2152bfe3f2eSlogwang static int
lio_dev_xstats_get_names(struct rte_eth_dev * eth_dev,struct rte_eth_xstat_name * xstats_names,unsigned limit __rte_unused)2162bfe3f2eSlogwang lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
2172bfe3f2eSlogwang struct rte_eth_xstat_name *xstats_names,
2182bfe3f2eSlogwang unsigned limit __rte_unused)
2192bfe3f2eSlogwang {
2202bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
2212bfe3f2eSlogwang unsigned int i;
2222bfe3f2eSlogwang
2232bfe3f2eSlogwang if (!lio_dev->intf_open) {
2242bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down\n",
2252bfe3f2eSlogwang lio_dev->port_id);
2262bfe3f2eSlogwang return -EINVAL;
2272bfe3f2eSlogwang }
2282bfe3f2eSlogwang
2292bfe3f2eSlogwang if (xstats_names == NULL)
2302bfe3f2eSlogwang return LIO_NB_XSTATS;
2312bfe3f2eSlogwang
2322bfe3f2eSlogwang /* Note: limit checked in rte_eth_xstats_names() */
2332bfe3f2eSlogwang
2342bfe3f2eSlogwang for (i = 0; i < LIO_NB_XSTATS; i++) {
2352bfe3f2eSlogwang snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
2362bfe3f2eSlogwang "%s", rte_lio_stats_strings[i].name);
2372bfe3f2eSlogwang }
2382bfe3f2eSlogwang
2392bfe3f2eSlogwang return LIO_NB_XSTATS;
2402bfe3f2eSlogwang }
2412bfe3f2eSlogwang
2422bfe3f2eSlogwang /* Reset hw stats for the port */
2434418919fSjohnjiang static int
lio_dev_xstats_reset(struct rte_eth_dev * eth_dev)2442bfe3f2eSlogwang lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
2452bfe3f2eSlogwang {
2462bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
2472bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
2482bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
2494418919fSjohnjiang int ret;
2502bfe3f2eSlogwang
2512bfe3f2eSlogwang if (!lio_dev->intf_open) {
2522bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down\n",
2532bfe3f2eSlogwang lio_dev->port_id);
2544418919fSjohnjiang return -EINVAL;
2552bfe3f2eSlogwang }
2562bfe3f2eSlogwang
2572bfe3f2eSlogwang /* flush added to prevent cmd failure
2582bfe3f2eSlogwang * incase the queue is full
2592bfe3f2eSlogwang */
2602bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
2612bfe3f2eSlogwang
2622bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
2632bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
2642bfe3f2eSlogwang
2652bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
2662bfe3f2eSlogwang ctrl_cmd.cond = 0;
2672bfe3f2eSlogwang
2682bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
2692bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
2702bfe3f2eSlogwang
2714418919fSjohnjiang ret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt);
2724418919fSjohnjiang if (ret != 0) {
2732bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send clear stats command\n");
2744418919fSjohnjiang return ret;
2752bfe3f2eSlogwang }
2762bfe3f2eSlogwang
2774418919fSjohnjiang ret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd);
2784418919fSjohnjiang if (ret != 0) {
2792bfe3f2eSlogwang lio_dev_err(lio_dev, "Clear stats command timed out\n");
2804418919fSjohnjiang return ret;
2812bfe3f2eSlogwang }
2822bfe3f2eSlogwang
2832bfe3f2eSlogwang /* clear stored per queue stats */
2844418919fSjohnjiang RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0);
2854418919fSjohnjiang return (*eth_dev->dev_ops->stats_reset)(eth_dev);
2862bfe3f2eSlogwang }
2872bfe3f2eSlogwang
2882bfe3f2eSlogwang /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
2892bfe3f2eSlogwang static int
lio_dev_stats_get(struct rte_eth_dev * eth_dev,struct rte_eth_stats * stats)2902bfe3f2eSlogwang lio_dev_stats_get(struct rte_eth_dev *eth_dev,
2912bfe3f2eSlogwang struct rte_eth_stats *stats)
2922bfe3f2eSlogwang {
2932bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
2942bfe3f2eSlogwang struct lio_droq_stats *oq_stats;
2952bfe3f2eSlogwang struct lio_iq_stats *iq_stats;
2962bfe3f2eSlogwang struct lio_instr_queue *txq;
2972bfe3f2eSlogwang struct lio_droq *droq;
2982bfe3f2eSlogwang int i, iq_no, oq_no;
2992bfe3f2eSlogwang uint64_t bytes = 0;
3002bfe3f2eSlogwang uint64_t pkts = 0;
3012bfe3f2eSlogwang uint64_t drop = 0;
3022bfe3f2eSlogwang
3032bfe3f2eSlogwang for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3042bfe3f2eSlogwang iq_no = lio_dev->linfo.txpciq[i].s.q_no;
3052bfe3f2eSlogwang txq = lio_dev->instr_queue[iq_no];
3062bfe3f2eSlogwang if (txq != NULL) {
3072bfe3f2eSlogwang iq_stats = &txq->stats;
3082bfe3f2eSlogwang pkts += iq_stats->tx_done;
3092bfe3f2eSlogwang drop += iq_stats->tx_dropped;
3102bfe3f2eSlogwang bytes += iq_stats->tx_tot_bytes;
3112bfe3f2eSlogwang }
3122bfe3f2eSlogwang }
3132bfe3f2eSlogwang
3142bfe3f2eSlogwang stats->opackets = pkts;
3152bfe3f2eSlogwang stats->obytes = bytes;
3162bfe3f2eSlogwang stats->oerrors = drop;
3172bfe3f2eSlogwang
3182bfe3f2eSlogwang pkts = 0;
3192bfe3f2eSlogwang drop = 0;
3202bfe3f2eSlogwang bytes = 0;
3212bfe3f2eSlogwang
3222bfe3f2eSlogwang for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
3232bfe3f2eSlogwang oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
3242bfe3f2eSlogwang droq = lio_dev->droq[oq_no];
3252bfe3f2eSlogwang if (droq != NULL) {
3262bfe3f2eSlogwang oq_stats = &droq->stats;
3272bfe3f2eSlogwang pkts += oq_stats->rx_pkts_received;
3282bfe3f2eSlogwang drop += (oq_stats->rx_dropped +
3292bfe3f2eSlogwang oq_stats->dropped_toomany +
3302bfe3f2eSlogwang oq_stats->dropped_nomem);
3312bfe3f2eSlogwang bytes += oq_stats->rx_bytes_received;
3322bfe3f2eSlogwang }
3332bfe3f2eSlogwang }
3342bfe3f2eSlogwang stats->ibytes = bytes;
3352bfe3f2eSlogwang stats->ipackets = pkts;
3362bfe3f2eSlogwang stats->ierrors = drop;
3372bfe3f2eSlogwang
3382bfe3f2eSlogwang return 0;
3392bfe3f2eSlogwang }
3402bfe3f2eSlogwang
3414418919fSjohnjiang static int
lio_dev_stats_reset(struct rte_eth_dev * eth_dev)3422bfe3f2eSlogwang lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
3432bfe3f2eSlogwang {
3442bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
3452bfe3f2eSlogwang struct lio_droq_stats *oq_stats;
3462bfe3f2eSlogwang struct lio_iq_stats *iq_stats;
3472bfe3f2eSlogwang struct lio_instr_queue *txq;
3482bfe3f2eSlogwang struct lio_droq *droq;
3492bfe3f2eSlogwang int i, iq_no, oq_no;
3502bfe3f2eSlogwang
3512bfe3f2eSlogwang for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
3522bfe3f2eSlogwang iq_no = lio_dev->linfo.txpciq[i].s.q_no;
3532bfe3f2eSlogwang txq = lio_dev->instr_queue[iq_no];
3542bfe3f2eSlogwang if (txq != NULL) {
3552bfe3f2eSlogwang iq_stats = &txq->stats;
3562bfe3f2eSlogwang memset(iq_stats, 0, sizeof(struct lio_iq_stats));
3572bfe3f2eSlogwang }
3582bfe3f2eSlogwang }
3592bfe3f2eSlogwang
3602bfe3f2eSlogwang for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
3612bfe3f2eSlogwang oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
3622bfe3f2eSlogwang droq = lio_dev->droq[oq_no];
3632bfe3f2eSlogwang if (droq != NULL) {
3642bfe3f2eSlogwang oq_stats = &droq->stats;
3652bfe3f2eSlogwang memset(oq_stats, 0, sizeof(struct lio_droq_stats));
3662bfe3f2eSlogwang }
3672bfe3f2eSlogwang }
3684418919fSjohnjiang
3694418919fSjohnjiang return 0;
3702bfe3f2eSlogwang }
3712bfe3f2eSlogwang
3724418919fSjohnjiang static int
lio_dev_info_get(struct rte_eth_dev * eth_dev,struct rte_eth_dev_info * devinfo)3732bfe3f2eSlogwang lio_dev_info_get(struct rte_eth_dev *eth_dev,
3742bfe3f2eSlogwang struct rte_eth_dev_info *devinfo)
3752bfe3f2eSlogwang {
3762bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
3772bfe3f2eSlogwang struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3782bfe3f2eSlogwang
3792bfe3f2eSlogwang switch (pci_dev->id.subsystem_device_id) {
3802bfe3f2eSlogwang /* CN23xx 10G cards */
3812bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2350_210:
3822bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2360_210:
3832bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
3842bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
3852bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
3862bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
3872bfe3f2eSlogwang devinfo->speed_capa = ETH_LINK_SPEED_10G;
3882bfe3f2eSlogwang break;
3892bfe3f2eSlogwang /* CN23xx 25G cards */
3902bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2350_225:
3912bfe3f2eSlogwang case PCI_SUBSYS_DEV_ID_CN2360_225:
3922bfe3f2eSlogwang devinfo->speed_capa = ETH_LINK_SPEED_25G;
3932bfe3f2eSlogwang break;
3942bfe3f2eSlogwang default:
3952bfe3f2eSlogwang devinfo->speed_capa = ETH_LINK_SPEED_10G;
3962bfe3f2eSlogwang lio_dev_err(lio_dev,
3972bfe3f2eSlogwang "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
3984418919fSjohnjiang return -EINVAL;
3992bfe3f2eSlogwang }
4002bfe3f2eSlogwang
4012bfe3f2eSlogwang devinfo->max_rx_queues = lio_dev->max_rx_queues;
4022bfe3f2eSlogwang devinfo->max_tx_queues = lio_dev->max_tx_queues;
4032bfe3f2eSlogwang
4042bfe3f2eSlogwang devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
4052bfe3f2eSlogwang devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
4062bfe3f2eSlogwang
4072bfe3f2eSlogwang devinfo->max_mac_addrs = 1;
4082bfe3f2eSlogwang
4092bfe3f2eSlogwang devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
4102bfe3f2eSlogwang DEV_RX_OFFLOAD_UDP_CKSUM |
4112bfe3f2eSlogwang DEV_RX_OFFLOAD_TCP_CKSUM |
4124418919fSjohnjiang DEV_RX_OFFLOAD_VLAN_STRIP |
4134418919fSjohnjiang DEV_RX_OFFLOAD_RSS_HASH);
4142bfe3f2eSlogwang devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
4152bfe3f2eSlogwang DEV_TX_OFFLOAD_UDP_CKSUM |
4162bfe3f2eSlogwang DEV_TX_OFFLOAD_TCP_CKSUM |
4172bfe3f2eSlogwang DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
4182bfe3f2eSlogwang
4192bfe3f2eSlogwang devinfo->rx_desc_lim = lio_rx_desc_lim;
4202bfe3f2eSlogwang devinfo->tx_desc_lim = lio_tx_desc_lim;
4212bfe3f2eSlogwang
4222bfe3f2eSlogwang devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
4232bfe3f2eSlogwang devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
4242bfe3f2eSlogwang devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
4252bfe3f2eSlogwang ETH_RSS_NONFRAG_IPV4_TCP |
4262bfe3f2eSlogwang ETH_RSS_IPV6 |
4272bfe3f2eSlogwang ETH_RSS_NONFRAG_IPV6_TCP |
4282bfe3f2eSlogwang ETH_RSS_IPV6_EX |
4292bfe3f2eSlogwang ETH_RSS_IPV6_TCP_EX);
4304418919fSjohnjiang return 0;
4312bfe3f2eSlogwang }
4322bfe3f2eSlogwang
4332bfe3f2eSlogwang static int
lio_dev_mtu_set(struct rte_eth_dev * eth_dev,uint16_t mtu)4342bfe3f2eSlogwang lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
4352bfe3f2eSlogwang {
4362bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
4372bfe3f2eSlogwang uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
4384418919fSjohnjiang uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
4392bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
4402bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
4412bfe3f2eSlogwang
4422bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
4432bfe3f2eSlogwang
4442bfe3f2eSlogwang if (!lio_dev->intf_open) {
4452bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
4462bfe3f2eSlogwang lio_dev->port_id);
4472bfe3f2eSlogwang return -EINVAL;
4482bfe3f2eSlogwang }
4492bfe3f2eSlogwang
4502bfe3f2eSlogwang /* check if VF MTU is within allowed range.
4512bfe3f2eSlogwang * New value should not exceed PF MTU.
4522bfe3f2eSlogwang */
4534418919fSjohnjiang if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {
4542bfe3f2eSlogwang lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
4554418919fSjohnjiang RTE_ETHER_MIN_MTU, pf_mtu);
4562bfe3f2eSlogwang return -EINVAL;
4572bfe3f2eSlogwang }
4582bfe3f2eSlogwang
4592bfe3f2eSlogwang /* flush added to prevent cmd failure
4602bfe3f2eSlogwang * incase the queue is full
4612bfe3f2eSlogwang */
4622bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
4632bfe3f2eSlogwang
4642bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
4652bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
4662bfe3f2eSlogwang
4672bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
4682bfe3f2eSlogwang ctrl_cmd.cond = 0;
4692bfe3f2eSlogwang
4702bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
4712bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = mtu;
4722bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
4732bfe3f2eSlogwang
4742bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
4752bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
4762bfe3f2eSlogwang return -1;
4772bfe3f2eSlogwang }
4782bfe3f2eSlogwang
4792bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
4802bfe3f2eSlogwang lio_dev_err(lio_dev, "Command to change MTU timed out\n");
4812bfe3f2eSlogwang return -1;
4822bfe3f2eSlogwang }
4832bfe3f2eSlogwang
4844418919fSjohnjiang if (frame_len > RTE_ETHER_MAX_LEN)
485d30ea906Sjfb8856606 eth_dev->data->dev_conf.rxmode.offloads |=
486d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME;
4872bfe3f2eSlogwang else
488d30ea906Sjfb8856606 eth_dev->data->dev_conf.rxmode.offloads &=
489d30ea906Sjfb8856606 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
4902bfe3f2eSlogwang
4912bfe3f2eSlogwang eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
4922bfe3f2eSlogwang eth_dev->data->mtu = mtu;
4932bfe3f2eSlogwang
4942bfe3f2eSlogwang return 0;
4952bfe3f2eSlogwang }
4962bfe3f2eSlogwang
4972bfe3f2eSlogwang static int
lio_dev_rss_reta_update(struct rte_eth_dev * eth_dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)4982bfe3f2eSlogwang lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
4992bfe3f2eSlogwang struct rte_eth_rss_reta_entry64 *reta_conf,
5002bfe3f2eSlogwang uint16_t reta_size)
5012bfe3f2eSlogwang {
5022bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
5032bfe3f2eSlogwang struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
5042bfe3f2eSlogwang struct lio_rss_set *rss_param;
5052bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
5062bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
5072bfe3f2eSlogwang int i, j, index;
5082bfe3f2eSlogwang
5092bfe3f2eSlogwang if (!lio_dev->intf_open) {
5102bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
5112bfe3f2eSlogwang lio_dev->port_id);
5122bfe3f2eSlogwang return -EINVAL;
5132bfe3f2eSlogwang }
5142bfe3f2eSlogwang
5152bfe3f2eSlogwang if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
5162bfe3f2eSlogwang lio_dev_err(lio_dev,
5172bfe3f2eSlogwang "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
5182bfe3f2eSlogwang reta_size, LIO_RSS_MAX_TABLE_SZ);
5192bfe3f2eSlogwang return -EINVAL;
5202bfe3f2eSlogwang }
5212bfe3f2eSlogwang
5222bfe3f2eSlogwang /* flush added to prevent cmd failure
5232bfe3f2eSlogwang * incase the queue is full
5242bfe3f2eSlogwang */
5252bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
5262bfe3f2eSlogwang
5272bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
5282bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
5292bfe3f2eSlogwang
5302bfe3f2eSlogwang rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
5312bfe3f2eSlogwang
5322bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
5332bfe3f2eSlogwang ctrl_cmd.cond = 0;
5342bfe3f2eSlogwang
5352bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
5362bfe3f2eSlogwang ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
5372bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
5382bfe3f2eSlogwang
5392bfe3f2eSlogwang rss_param->param.flags = 0xF;
5402bfe3f2eSlogwang rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
5412bfe3f2eSlogwang rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
5422bfe3f2eSlogwang
5432bfe3f2eSlogwang for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
5442bfe3f2eSlogwang for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
5452bfe3f2eSlogwang if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
5462bfe3f2eSlogwang index = (i * RTE_RETA_GROUP_SIZE) + j;
5472bfe3f2eSlogwang rss_state->itable[index] = reta_conf[i].reta[j];
5482bfe3f2eSlogwang }
5492bfe3f2eSlogwang }
5502bfe3f2eSlogwang }
5512bfe3f2eSlogwang
5522bfe3f2eSlogwang rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
5532bfe3f2eSlogwang memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
5542bfe3f2eSlogwang
5552bfe3f2eSlogwang lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
5562bfe3f2eSlogwang
5572bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
5582bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to set rss hash\n");
5592bfe3f2eSlogwang return -1;
5602bfe3f2eSlogwang }
5612bfe3f2eSlogwang
5622bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
5632bfe3f2eSlogwang lio_dev_err(lio_dev, "Set rss hash timed out\n");
5642bfe3f2eSlogwang return -1;
5652bfe3f2eSlogwang }
5662bfe3f2eSlogwang
5672bfe3f2eSlogwang return 0;
5682bfe3f2eSlogwang }
5692bfe3f2eSlogwang
5702bfe3f2eSlogwang static int
lio_dev_rss_reta_query(struct rte_eth_dev * eth_dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)5712bfe3f2eSlogwang lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
5722bfe3f2eSlogwang struct rte_eth_rss_reta_entry64 *reta_conf,
5732bfe3f2eSlogwang uint16_t reta_size)
5742bfe3f2eSlogwang {
5752bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
5762bfe3f2eSlogwang struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
5772bfe3f2eSlogwang int i, num;
5782bfe3f2eSlogwang
5792bfe3f2eSlogwang if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
5802bfe3f2eSlogwang lio_dev_err(lio_dev,
5812bfe3f2eSlogwang "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
5822bfe3f2eSlogwang reta_size, LIO_RSS_MAX_TABLE_SZ);
5832bfe3f2eSlogwang return -EINVAL;
5842bfe3f2eSlogwang }
5852bfe3f2eSlogwang
5862bfe3f2eSlogwang num = reta_size / RTE_RETA_GROUP_SIZE;
5872bfe3f2eSlogwang
5882bfe3f2eSlogwang for (i = 0; i < num; i++) {
5892bfe3f2eSlogwang memcpy(reta_conf->reta,
5902bfe3f2eSlogwang &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
5912bfe3f2eSlogwang RTE_RETA_GROUP_SIZE);
5922bfe3f2eSlogwang reta_conf++;
5932bfe3f2eSlogwang }
5942bfe3f2eSlogwang
5952bfe3f2eSlogwang return 0;
5962bfe3f2eSlogwang }
5972bfe3f2eSlogwang
5982bfe3f2eSlogwang static int
lio_dev_rss_hash_conf_get(struct rte_eth_dev * eth_dev,struct rte_eth_rss_conf * rss_conf)5992bfe3f2eSlogwang lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
6002bfe3f2eSlogwang struct rte_eth_rss_conf *rss_conf)
6012bfe3f2eSlogwang {
6022bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
6032bfe3f2eSlogwang struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
6042bfe3f2eSlogwang uint8_t *hash_key = NULL;
6052bfe3f2eSlogwang uint64_t rss_hf = 0;
6062bfe3f2eSlogwang
6072bfe3f2eSlogwang if (rss_state->hash_disable) {
6082bfe3f2eSlogwang lio_dev_info(lio_dev, "RSS disabled in nic\n");
6092bfe3f2eSlogwang rss_conf->rss_hf = 0;
6102bfe3f2eSlogwang return 0;
6112bfe3f2eSlogwang }
6122bfe3f2eSlogwang
6132bfe3f2eSlogwang /* Get key value */
6142bfe3f2eSlogwang hash_key = rss_conf->rss_key;
6152bfe3f2eSlogwang if (hash_key != NULL)
6162bfe3f2eSlogwang memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
6172bfe3f2eSlogwang
6182bfe3f2eSlogwang if (rss_state->ip)
6192bfe3f2eSlogwang rss_hf |= ETH_RSS_IPV4;
6202bfe3f2eSlogwang if (rss_state->tcp_hash)
6212bfe3f2eSlogwang rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
6222bfe3f2eSlogwang if (rss_state->ipv6)
6232bfe3f2eSlogwang rss_hf |= ETH_RSS_IPV6;
6242bfe3f2eSlogwang if (rss_state->ipv6_tcp_hash)
6252bfe3f2eSlogwang rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
6262bfe3f2eSlogwang if (rss_state->ipv6_ex)
6272bfe3f2eSlogwang rss_hf |= ETH_RSS_IPV6_EX;
6282bfe3f2eSlogwang if (rss_state->ipv6_tcp_ex_hash)
6292bfe3f2eSlogwang rss_hf |= ETH_RSS_IPV6_TCP_EX;
6302bfe3f2eSlogwang
6312bfe3f2eSlogwang rss_conf->rss_hf = rss_hf;
6322bfe3f2eSlogwang
6332bfe3f2eSlogwang return 0;
6342bfe3f2eSlogwang }
6352bfe3f2eSlogwang
6362bfe3f2eSlogwang static int
lio_dev_rss_hash_update(struct rte_eth_dev * eth_dev,struct rte_eth_rss_conf * rss_conf)6372bfe3f2eSlogwang lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
6382bfe3f2eSlogwang struct rte_eth_rss_conf *rss_conf)
6392bfe3f2eSlogwang {
6402bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
6412bfe3f2eSlogwang struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
6422bfe3f2eSlogwang struct lio_rss_set *rss_param;
6432bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
6442bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
6452bfe3f2eSlogwang
6462bfe3f2eSlogwang if (!lio_dev->intf_open) {
6472bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
6482bfe3f2eSlogwang lio_dev->port_id);
6492bfe3f2eSlogwang return -EINVAL;
6502bfe3f2eSlogwang }
6512bfe3f2eSlogwang
6522bfe3f2eSlogwang /* flush added to prevent cmd failure
6532bfe3f2eSlogwang * incase the queue is full
6542bfe3f2eSlogwang */
6552bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
6562bfe3f2eSlogwang
6572bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
6582bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
6592bfe3f2eSlogwang
6602bfe3f2eSlogwang rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
6612bfe3f2eSlogwang
6622bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
6632bfe3f2eSlogwang ctrl_cmd.cond = 0;
6642bfe3f2eSlogwang
6652bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
6662bfe3f2eSlogwang ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
6672bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
6682bfe3f2eSlogwang
6692bfe3f2eSlogwang rss_param->param.flags = 0xF;
6702bfe3f2eSlogwang
6712bfe3f2eSlogwang if (rss_conf->rss_key) {
6722bfe3f2eSlogwang rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
6732bfe3f2eSlogwang rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
6742bfe3f2eSlogwang rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
6752bfe3f2eSlogwang memcpy(rss_state->hash_key, rss_conf->rss_key,
6762bfe3f2eSlogwang rss_state->hash_key_size);
6772bfe3f2eSlogwang memcpy(rss_param->key, rss_state->hash_key,
6782bfe3f2eSlogwang rss_state->hash_key_size);
6792bfe3f2eSlogwang }
6802bfe3f2eSlogwang
6812bfe3f2eSlogwang if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
6822bfe3f2eSlogwang /* Can't disable rss through hash flags,
6832bfe3f2eSlogwang * if it is enabled by default during init
6842bfe3f2eSlogwang */
6852bfe3f2eSlogwang if (!rss_state->hash_disable)
6862bfe3f2eSlogwang return -EINVAL;
6872bfe3f2eSlogwang
6882bfe3f2eSlogwang /* This is for --disable-rss during testpmd launch */
6892bfe3f2eSlogwang rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
6902bfe3f2eSlogwang } else {
6912bfe3f2eSlogwang uint32_t hashinfo = 0;
6922bfe3f2eSlogwang
6932bfe3f2eSlogwang /* Can't enable rss if disabled by default during init */
6942bfe3f2eSlogwang if (rss_state->hash_disable)
6952bfe3f2eSlogwang return -EINVAL;
6962bfe3f2eSlogwang
6972bfe3f2eSlogwang if (rss_conf->rss_hf & ETH_RSS_IPV4) {
6982bfe3f2eSlogwang hashinfo |= LIO_RSS_HASH_IPV4;
6992bfe3f2eSlogwang rss_state->ip = 1;
7002bfe3f2eSlogwang } else {
7012bfe3f2eSlogwang rss_state->ip = 0;
7022bfe3f2eSlogwang }
7032bfe3f2eSlogwang
7042bfe3f2eSlogwang if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
7052bfe3f2eSlogwang hashinfo |= LIO_RSS_HASH_TCP_IPV4;
7062bfe3f2eSlogwang rss_state->tcp_hash = 1;
7072bfe3f2eSlogwang } else {
7082bfe3f2eSlogwang rss_state->tcp_hash = 0;
7092bfe3f2eSlogwang }
7102bfe3f2eSlogwang
7112bfe3f2eSlogwang if (rss_conf->rss_hf & ETH_RSS_IPV6) {
7122bfe3f2eSlogwang hashinfo |= LIO_RSS_HASH_IPV6;
7132bfe3f2eSlogwang rss_state->ipv6 = 1;
7142bfe3f2eSlogwang } else {
7152bfe3f2eSlogwang rss_state->ipv6 = 0;
7162bfe3f2eSlogwang }
7172bfe3f2eSlogwang
7182bfe3f2eSlogwang if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
7192bfe3f2eSlogwang hashinfo |= LIO_RSS_HASH_TCP_IPV6;
7202bfe3f2eSlogwang rss_state->ipv6_tcp_hash = 1;
7212bfe3f2eSlogwang } else {
7222bfe3f2eSlogwang rss_state->ipv6_tcp_hash = 0;
7232bfe3f2eSlogwang }
7242bfe3f2eSlogwang
7252bfe3f2eSlogwang if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
7262bfe3f2eSlogwang hashinfo |= LIO_RSS_HASH_IPV6_EX;
7272bfe3f2eSlogwang rss_state->ipv6_ex = 1;
7282bfe3f2eSlogwang } else {
7292bfe3f2eSlogwang rss_state->ipv6_ex = 0;
7302bfe3f2eSlogwang }
7312bfe3f2eSlogwang
7322bfe3f2eSlogwang if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
7332bfe3f2eSlogwang hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
7342bfe3f2eSlogwang rss_state->ipv6_tcp_ex_hash = 1;
7352bfe3f2eSlogwang } else {
7362bfe3f2eSlogwang rss_state->ipv6_tcp_ex_hash = 0;
7372bfe3f2eSlogwang }
7382bfe3f2eSlogwang
7392bfe3f2eSlogwang rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
7402bfe3f2eSlogwang rss_param->param.hashinfo = hashinfo;
7412bfe3f2eSlogwang }
7422bfe3f2eSlogwang
7432bfe3f2eSlogwang lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
7442bfe3f2eSlogwang
7452bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
7462bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to set rss hash\n");
7472bfe3f2eSlogwang return -1;
7482bfe3f2eSlogwang }
7492bfe3f2eSlogwang
7502bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
7512bfe3f2eSlogwang lio_dev_err(lio_dev, "Set rss hash timed out\n");
7522bfe3f2eSlogwang return -1;
7532bfe3f2eSlogwang }
7542bfe3f2eSlogwang
7552bfe3f2eSlogwang return 0;
7562bfe3f2eSlogwang }
7572bfe3f2eSlogwang
7582bfe3f2eSlogwang /**
7592bfe3f2eSlogwang * Add vxlan dest udp port for an interface.
7602bfe3f2eSlogwang *
7612bfe3f2eSlogwang * @param eth_dev
7622bfe3f2eSlogwang * Pointer to the structure rte_eth_dev
7632bfe3f2eSlogwang * @param udp_tnl
7642bfe3f2eSlogwang * udp tunnel conf
7652bfe3f2eSlogwang *
7662bfe3f2eSlogwang * @return
7672bfe3f2eSlogwang * On success return 0
7682bfe3f2eSlogwang * On failure return -1
7692bfe3f2eSlogwang */
7702bfe3f2eSlogwang static int
lio_dev_udp_tunnel_add(struct rte_eth_dev * eth_dev,struct rte_eth_udp_tunnel * udp_tnl)7712bfe3f2eSlogwang lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
7722bfe3f2eSlogwang struct rte_eth_udp_tunnel *udp_tnl)
7732bfe3f2eSlogwang {
7742bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
7752bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
7762bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
7772bfe3f2eSlogwang
7782bfe3f2eSlogwang if (udp_tnl == NULL)
7792bfe3f2eSlogwang return -EINVAL;
7802bfe3f2eSlogwang
7812bfe3f2eSlogwang if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
7822bfe3f2eSlogwang lio_dev_err(lio_dev, "Unsupported tunnel type\n");
7832bfe3f2eSlogwang return -1;
7842bfe3f2eSlogwang }
7852bfe3f2eSlogwang
7862bfe3f2eSlogwang /* flush added to prevent cmd failure
7872bfe3f2eSlogwang * incase the queue is full
7882bfe3f2eSlogwang */
7892bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
7902bfe3f2eSlogwang
7912bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
7922bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
7932bfe3f2eSlogwang
7942bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
7952bfe3f2eSlogwang ctrl_cmd.cond = 0;
7962bfe3f2eSlogwang
7972bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
7982bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
7992bfe3f2eSlogwang ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
8002bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
8012bfe3f2eSlogwang
8022bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
8032bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
8042bfe3f2eSlogwang return -1;
8052bfe3f2eSlogwang }
8062bfe3f2eSlogwang
8072bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
8082bfe3f2eSlogwang lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
8092bfe3f2eSlogwang return -1;
8102bfe3f2eSlogwang }
8112bfe3f2eSlogwang
8122bfe3f2eSlogwang return 0;
8132bfe3f2eSlogwang }
8142bfe3f2eSlogwang
8152bfe3f2eSlogwang /**
8162bfe3f2eSlogwang * Remove vxlan dest udp port for an interface.
8172bfe3f2eSlogwang *
8182bfe3f2eSlogwang * @param eth_dev
8192bfe3f2eSlogwang * Pointer to the structure rte_eth_dev
8202bfe3f2eSlogwang * @param udp_tnl
8212bfe3f2eSlogwang * udp tunnel conf
8222bfe3f2eSlogwang *
8232bfe3f2eSlogwang * @return
8242bfe3f2eSlogwang * On success return 0
8252bfe3f2eSlogwang * On failure return -1
8262bfe3f2eSlogwang */
8272bfe3f2eSlogwang static int
lio_dev_udp_tunnel_del(struct rte_eth_dev * eth_dev,struct rte_eth_udp_tunnel * udp_tnl)8282bfe3f2eSlogwang lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
8292bfe3f2eSlogwang struct rte_eth_udp_tunnel *udp_tnl)
8302bfe3f2eSlogwang {
8312bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
8322bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
8332bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
8342bfe3f2eSlogwang
8352bfe3f2eSlogwang if (udp_tnl == NULL)
8362bfe3f2eSlogwang return -EINVAL;
8372bfe3f2eSlogwang
8382bfe3f2eSlogwang if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
8392bfe3f2eSlogwang lio_dev_err(lio_dev, "Unsupported tunnel type\n");
8402bfe3f2eSlogwang return -1;
8412bfe3f2eSlogwang }
8422bfe3f2eSlogwang
8432bfe3f2eSlogwang /* flush added to prevent cmd failure
8442bfe3f2eSlogwang * incase the queue is full
8452bfe3f2eSlogwang */
8462bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
8472bfe3f2eSlogwang
8482bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
8492bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
8502bfe3f2eSlogwang
8512bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
8522bfe3f2eSlogwang ctrl_cmd.cond = 0;
8532bfe3f2eSlogwang
8542bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
8552bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
8562bfe3f2eSlogwang ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
8572bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
8582bfe3f2eSlogwang
8592bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
8602bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
8612bfe3f2eSlogwang return -1;
8622bfe3f2eSlogwang }
8632bfe3f2eSlogwang
8642bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
8652bfe3f2eSlogwang lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
8662bfe3f2eSlogwang return -1;
8672bfe3f2eSlogwang }
8682bfe3f2eSlogwang
8692bfe3f2eSlogwang return 0;
8702bfe3f2eSlogwang }
8712bfe3f2eSlogwang
8722bfe3f2eSlogwang static int
lio_dev_vlan_filter_set(struct rte_eth_dev * eth_dev,uint16_t vlan_id,int on)8732bfe3f2eSlogwang lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
8742bfe3f2eSlogwang {
8752bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
8762bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
8772bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
8782bfe3f2eSlogwang
8792bfe3f2eSlogwang if (lio_dev->linfo.vlan_is_admin_assigned)
8802bfe3f2eSlogwang return -EPERM;
8812bfe3f2eSlogwang
8822bfe3f2eSlogwang /* flush added to prevent cmd failure
8832bfe3f2eSlogwang * incase the queue is full
8842bfe3f2eSlogwang */
8852bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
8862bfe3f2eSlogwang
8872bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
8882bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
8892bfe3f2eSlogwang
8902bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
8912bfe3f2eSlogwang ctrl_cmd.cond = 0;
8922bfe3f2eSlogwang
8932bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = on ?
8942bfe3f2eSlogwang LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
8952bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = vlan_id;
8962bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
8972bfe3f2eSlogwang
8982bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
8992bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
9002bfe3f2eSlogwang on ? "add" : "remove");
9012bfe3f2eSlogwang return -1;
9022bfe3f2eSlogwang }
9032bfe3f2eSlogwang
9042bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
9052bfe3f2eSlogwang lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
9062bfe3f2eSlogwang on ? "add" : "remove");
9072bfe3f2eSlogwang return -1;
9082bfe3f2eSlogwang }
9092bfe3f2eSlogwang
9102bfe3f2eSlogwang return 0;
9112bfe3f2eSlogwang }
9122bfe3f2eSlogwang
9132bfe3f2eSlogwang static uint64_t
lio_hweight64(uint64_t w)9142bfe3f2eSlogwang lio_hweight64(uint64_t w)
9152bfe3f2eSlogwang {
9162bfe3f2eSlogwang uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
9172bfe3f2eSlogwang
9182bfe3f2eSlogwang res =
9192bfe3f2eSlogwang (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
9202bfe3f2eSlogwang res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
9212bfe3f2eSlogwang res = res + (res >> 8);
9222bfe3f2eSlogwang res = res + (res >> 16);
9232bfe3f2eSlogwang
9242bfe3f2eSlogwang return (res + (res >> 32)) & 0x00000000000000FFul;
9252bfe3f2eSlogwang }
9262bfe3f2eSlogwang
9272bfe3f2eSlogwang static int
lio_dev_link_update(struct rte_eth_dev * eth_dev,int wait_to_complete __rte_unused)9282bfe3f2eSlogwang lio_dev_link_update(struct rte_eth_dev *eth_dev,
9292bfe3f2eSlogwang int wait_to_complete __rte_unused)
9302bfe3f2eSlogwang {
9312bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
932d30ea906Sjfb8856606 struct rte_eth_link link;
9332bfe3f2eSlogwang
9342bfe3f2eSlogwang /* Initialize */
935d30ea906Sjfb8856606 memset(&link, 0, sizeof(link));
9362bfe3f2eSlogwang link.link_status = ETH_LINK_DOWN;
9372bfe3f2eSlogwang link.link_speed = ETH_SPEED_NUM_NONE;
9382bfe3f2eSlogwang link.link_duplex = ETH_LINK_HALF_DUPLEX;
9392bfe3f2eSlogwang link.link_autoneg = ETH_LINK_AUTONEG;
9402bfe3f2eSlogwang
9412bfe3f2eSlogwang /* Return what we found */
9422bfe3f2eSlogwang if (lio_dev->linfo.link.s.link_up == 0) {
9432bfe3f2eSlogwang /* Interface is down */
944d30ea906Sjfb8856606 return rte_eth_linkstatus_set(eth_dev, &link);
9452bfe3f2eSlogwang }
9462bfe3f2eSlogwang
9472bfe3f2eSlogwang link.link_status = ETH_LINK_UP; /* Interface is up */
9482bfe3f2eSlogwang link.link_duplex = ETH_LINK_FULL_DUPLEX;
9492bfe3f2eSlogwang switch (lio_dev->linfo.link.s.speed) {
9502bfe3f2eSlogwang case LIO_LINK_SPEED_10000:
9512bfe3f2eSlogwang link.link_speed = ETH_SPEED_NUM_10G;
9522bfe3f2eSlogwang break;
9532bfe3f2eSlogwang case LIO_LINK_SPEED_25000:
9542bfe3f2eSlogwang link.link_speed = ETH_SPEED_NUM_25G;
9552bfe3f2eSlogwang break;
9562bfe3f2eSlogwang default:
9572bfe3f2eSlogwang link.link_speed = ETH_SPEED_NUM_NONE;
9582bfe3f2eSlogwang link.link_duplex = ETH_LINK_HALF_DUPLEX;
9592bfe3f2eSlogwang }
9602bfe3f2eSlogwang
961d30ea906Sjfb8856606 return rte_eth_linkstatus_set(eth_dev, &link);
9622bfe3f2eSlogwang }
9632bfe3f2eSlogwang
9642bfe3f2eSlogwang /**
9652bfe3f2eSlogwang * \brief Net device enable, disable allmulticast
9662bfe3f2eSlogwang * @param eth_dev Pointer to the structure rte_eth_dev
9674418919fSjohnjiang *
9684418919fSjohnjiang * @return
9694418919fSjohnjiang * On success return 0
9704418919fSjohnjiang * On failure return negative errno
9712bfe3f2eSlogwang */
9724418919fSjohnjiang static int
lio_change_dev_flag(struct rte_eth_dev * eth_dev)9732bfe3f2eSlogwang lio_change_dev_flag(struct rte_eth_dev *eth_dev)
9742bfe3f2eSlogwang {
9752bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
9762bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
9772bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
9782bfe3f2eSlogwang
9792bfe3f2eSlogwang /* flush added to prevent cmd failure
9802bfe3f2eSlogwang * incase the queue is full
9812bfe3f2eSlogwang */
9822bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
9832bfe3f2eSlogwang
9842bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
9852bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
9862bfe3f2eSlogwang
9872bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
9882bfe3f2eSlogwang ctrl_cmd.cond = 0;
9892bfe3f2eSlogwang
9902bfe3f2eSlogwang /* Create a ctrl pkt command to be sent to core app. */
9912bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
9922bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
9932bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
9942bfe3f2eSlogwang
9952bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
9962bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send change flag message\n");
9974418919fSjohnjiang return -EAGAIN;
9982bfe3f2eSlogwang }
9992bfe3f2eSlogwang
10004418919fSjohnjiang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
10012bfe3f2eSlogwang lio_dev_err(lio_dev, "Change dev flag command timed out\n");
10024418919fSjohnjiang return -ETIMEDOUT;
10032bfe3f2eSlogwang }
10042bfe3f2eSlogwang
10054418919fSjohnjiang return 0;
10064418919fSjohnjiang }
10074418919fSjohnjiang
10084418919fSjohnjiang static int
lio_dev_promiscuous_enable(struct rte_eth_dev * eth_dev)10092bfe3f2eSlogwang lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
10102bfe3f2eSlogwang {
10112bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
10122bfe3f2eSlogwang
10132bfe3f2eSlogwang if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
10142bfe3f2eSlogwang lio_dev_err(lio_dev, "Require firmware version >= %s\n",
10152bfe3f2eSlogwang LIO_VF_TRUST_MIN_VERSION);
10164418919fSjohnjiang return -EAGAIN;
10172bfe3f2eSlogwang }
10182bfe3f2eSlogwang
10192bfe3f2eSlogwang if (!lio_dev->intf_open) {
10202bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
10212bfe3f2eSlogwang lio_dev->port_id);
10224418919fSjohnjiang return -EAGAIN;
10232bfe3f2eSlogwang }
10242bfe3f2eSlogwang
10252bfe3f2eSlogwang lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
10264418919fSjohnjiang return lio_change_dev_flag(eth_dev);
10272bfe3f2eSlogwang }
10282bfe3f2eSlogwang
10294418919fSjohnjiang static int
lio_dev_promiscuous_disable(struct rte_eth_dev * eth_dev)10302bfe3f2eSlogwang lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
10312bfe3f2eSlogwang {
10322bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
10332bfe3f2eSlogwang
10342bfe3f2eSlogwang if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
10352bfe3f2eSlogwang lio_dev_err(lio_dev, "Require firmware version >= %s\n",
10362bfe3f2eSlogwang LIO_VF_TRUST_MIN_VERSION);
10374418919fSjohnjiang return -EAGAIN;
10382bfe3f2eSlogwang }
10392bfe3f2eSlogwang
10402bfe3f2eSlogwang if (!lio_dev->intf_open) {
10412bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
10422bfe3f2eSlogwang lio_dev->port_id);
10434418919fSjohnjiang return -EAGAIN;
10442bfe3f2eSlogwang }
10452bfe3f2eSlogwang
10462bfe3f2eSlogwang lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
10474418919fSjohnjiang return lio_change_dev_flag(eth_dev);
10482bfe3f2eSlogwang }
10492bfe3f2eSlogwang
10504418919fSjohnjiang static int
lio_dev_allmulticast_enable(struct rte_eth_dev * eth_dev)10512bfe3f2eSlogwang lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
10522bfe3f2eSlogwang {
10532bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
10542bfe3f2eSlogwang
10552bfe3f2eSlogwang if (!lio_dev->intf_open) {
10562bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
10572bfe3f2eSlogwang lio_dev->port_id);
10584418919fSjohnjiang return -EAGAIN;
10592bfe3f2eSlogwang }
10602bfe3f2eSlogwang
10612bfe3f2eSlogwang lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
10624418919fSjohnjiang return lio_change_dev_flag(eth_dev);
10632bfe3f2eSlogwang }
10642bfe3f2eSlogwang
10654418919fSjohnjiang static int
lio_dev_allmulticast_disable(struct rte_eth_dev * eth_dev)10662bfe3f2eSlogwang lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
10672bfe3f2eSlogwang {
10682bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
10692bfe3f2eSlogwang
10702bfe3f2eSlogwang if (!lio_dev->intf_open) {
10712bfe3f2eSlogwang lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
10722bfe3f2eSlogwang lio_dev->port_id);
10734418919fSjohnjiang return -EAGAIN;
10742bfe3f2eSlogwang }
10752bfe3f2eSlogwang
10762bfe3f2eSlogwang lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
10774418919fSjohnjiang return lio_change_dev_flag(eth_dev);
10782bfe3f2eSlogwang }
10792bfe3f2eSlogwang
10802bfe3f2eSlogwang static void
lio_dev_rss_configure(struct rte_eth_dev * eth_dev)10812bfe3f2eSlogwang lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
10822bfe3f2eSlogwang {
10832bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
10842bfe3f2eSlogwang struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
10852bfe3f2eSlogwang struct rte_eth_rss_reta_entry64 reta_conf[8];
10862bfe3f2eSlogwang struct rte_eth_rss_conf rss_conf;
10872bfe3f2eSlogwang uint16_t i;
10882bfe3f2eSlogwang
10892bfe3f2eSlogwang /* Configure the RSS key and the RSS protocols used to compute
10902bfe3f2eSlogwang * the RSS hash of input packets.
10912bfe3f2eSlogwang */
10922bfe3f2eSlogwang rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
10932bfe3f2eSlogwang if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
10942bfe3f2eSlogwang rss_state->hash_disable = 1;
10952bfe3f2eSlogwang lio_dev_rss_hash_update(eth_dev, &rss_conf);
10962bfe3f2eSlogwang return;
10972bfe3f2eSlogwang }
10982bfe3f2eSlogwang
10992bfe3f2eSlogwang if (rss_conf.rss_key == NULL)
11002bfe3f2eSlogwang rss_conf.rss_key = lio_rss_key; /* Default hash key */
11012bfe3f2eSlogwang
11022bfe3f2eSlogwang lio_dev_rss_hash_update(eth_dev, &rss_conf);
11032bfe3f2eSlogwang
11042bfe3f2eSlogwang memset(reta_conf, 0, sizeof(reta_conf));
11052bfe3f2eSlogwang for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
11062bfe3f2eSlogwang uint8_t q_idx, conf_idx, reta_idx;
11072bfe3f2eSlogwang
11082bfe3f2eSlogwang q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
11092bfe3f2eSlogwang i % eth_dev->data->nb_rx_queues : 0);
11102bfe3f2eSlogwang conf_idx = i / RTE_RETA_GROUP_SIZE;
11112bfe3f2eSlogwang reta_idx = i % RTE_RETA_GROUP_SIZE;
11122bfe3f2eSlogwang reta_conf[conf_idx].reta[reta_idx] = q_idx;
11132bfe3f2eSlogwang reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
11142bfe3f2eSlogwang }
11152bfe3f2eSlogwang
11162bfe3f2eSlogwang lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
11172bfe3f2eSlogwang }
11182bfe3f2eSlogwang
11192bfe3f2eSlogwang static void
lio_dev_mq_rx_configure(struct rte_eth_dev * eth_dev)11202bfe3f2eSlogwang lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
11212bfe3f2eSlogwang {
11222bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
11232bfe3f2eSlogwang struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
11242bfe3f2eSlogwang struct rte_eth_rss_conf rss_conf;
11252bfe3f2eSlogwang
11262bfe3f2eSlogwang switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
11272bfe3f2eSlogwang case ETH_MQ_RX_RSS:
11282bfe3f2eSlogwang lio_dev_rss_configure(eth_dev);
11292bfe3f2eSlogwang break;
11302bfe3f2eSlogwang case ETH_MQ_RX_NONE:
11312bfe3f2eSlogwang /* if mq_mode is none, disable rss mode. */
11322bfe3f2eSlogwang default:
11332bfe3f2eSlogwang memset(&rss_conf, 0, sizeof(rss_conf));
11342bfe3f2eSlogwang rss_state->hash_disable = 1;
11352bfe3f2eSlogwang lio_dev_rss_hash_update(eth_dev, &rss_conf);
11362bfe3f2eSlogwang }
11372bfe3f2eSlogwang }
11382bfe3f2eSlogwang
11392bfe3f2eSlogwang /**
11402bfe3f2eSlogwang * Setup our receive queue/ringbuffer. This is the
11412bfe3f2eSlogwang * queue the Octeon uses to send us packets and
11422bfe3f2eSlogwang * responses. We are given a memory pool for our
11432bfe3f2eSlogwang * packet buffers that are used to populate the receive
11442bfe3f2eSlogwang * queue.
11452bfe3f2eSlogwang *
11462bfe3f2eSlogwang * @param eth_dev
11472bfe3f2eSlogwang * Pointer to the structure rte_eth_dev
11482bfe3f2eSlogwang * @param q_no
11492bfe3f2eSlogwang * Queue number
11502bfe3f2eSlogwang * @param num_rx_descs
11512bfe3f2eSlogwang * Number of entries in the queue
11522bfe3f2eSlogwang * @param socket_id
11532bfe3f2eSlogwang * Where to allocate memory
11542bfe3f2eSlogwang * @param rx_conf
11552bfe3f2eSlogwang * Pointer to the struction rte_eth_rxconf
11562bfe3f2eSlogwang * @param mp
11572bfe3f2eSlogwang * Pointer to the packet pool
11582bfe3f2eSlogwang *
11592bfe3f2eSlogwang * @return
11602bfe3f2eSlogwang * - On success, return 0
11612bfe3f2eSlogwang * - On failure, return -1
11622bfe3f2eSlogwang */
11632bfe3f2eSlogwang static int
lio_dev_rx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t q_no,uint16_t num_rx_descs,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf __rte_unused,struct rte_mempool * mp)11642bfe3f2eSlogwang lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
11652bfe3f2eSlogwang uint16_t num_rx_descs, unsigned int socket_id,
11662bfe3f2eSlogwang const struct rte_eth_rxconf *rx_conf __rte_unused,
11672bfe3f2eSlogwang struct rte_mempool *mp)
11682bfe3f2eSlogwang {
11692bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
11702bfe3f2eSlogwang struct rte_pktmbuf_pool_private *mbp_priv;
11712bfe3f2eSlogwang uint32_t fw_mapped_oq;
11722bfe3f2eSlogwang uint16_t buf_size;
11732bfe3f2eSlogwang
11742bfe3f2eSlogwang if (q_no >= lio_dev->nb_rx_queues) {
11752bfe3f2eSlogwang lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
11762bfe3f2eSlogwang return -EINVAL;
11772bfe3f2eSlogwang }
11782bfe3f2eSlogwang
11792bfe3f2eSlogwang lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
11802bfe3f2eSlogwang
11812bfe3f2eSlogwang fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
11822bfe3f2eSlogwang
1183d30ea906Sjfb8856606 /* Free previous allocation if any */
1184d30ea906Sjfb8856606 if (eth_dev->data->rx_queues[q_no] != NULL) {
1185d30ea906Sjfb8856606 lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
1186d30ea906Sjfb8856606 eth_dev->data->rx_queues[q_no] = NULL;
11872bfe3f2eSlogwang }
11882bfe3f2eSlogwang
11892bfe3f2eSlogwang mbp_priv = rte_mempool_get_priv(mp);
11902bfe3f2eSlogwang buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
11912bfe3f2eSlogwang
11922bfe3f2eSlogwang if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
11932bfe3f2eSlogwang socket_id)) {
11942bfe3f2eSlogwang lio_dev_err(lio_dev, "droq allocation failed\n");
11952bfe3f2eSlogwang return -1;
11962bfe3f2eSlogwang }
11972bfe3f2eSlogwang
11982bfe3f2eSlogwang eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
11992bfe3f2eSlogwang
12002bfe3f2eSlogwang return 0;
12012bfe3f2eSlogwang }
12022bfe3f2eSlogwang
12032bfe3f2eSlogwang /**
12042bfe3f2eSlogwang * Release the receive queue/ringbuffer. Called by
12052bfe3f2eSlogwang * the upper layers.
12062bfe3f2eSlogwang *
12072bfe3f2eSlogwang * @param rxq
12082bfe3f2eSlogwang * Opaque pointer to the receive queue to release
12092bfe3f2eSlogwang *
12102bfe3f2eSlogwang * @return
12112bfe3f2eSlogwang * - nothing
12122bfe3f2eSlogwang */
12132bfe3f2eSlogwang void
lio_dev_rx_queue_release(void * rxq)12142bfe3f2eSlogwang lio_dev_rx_queue_release(void *rxq)
12152bfe3f2eSlogwang {
12162bfe3f2eSlogwang struct lio_droq *droq = rxq;
12172bfe3f2eSlogwang int oq_no;
12182bfe3f2eSlogwang
12192bfe3f2eSlogwang if (droq) {
12202bfe3f2eSlogwang oq_no = droq->q_no;
12212bfe3f2eSlogwang lio_delete_droq_queue(droq->lio_dev, oq_no);
12222bfe3f2eSlogwang }
12232bfe3f2eSlogwang }
12242bfe3f2eSlogwang
12252bfe3f2eSlogwang /**
12262bfe3f2eSlogwang * Allocate and initialize SW ring. Initialize associated HW registers.
12272bfe3f2eSlogwang *
12282bfe3f2eSlogwang * @param eth_dev
12292bfe3f2eSlogwang * Pointer to structure rte_eth_dev
12302bfe3f2eSlogwang *
12312bfe3f2eSlogwang * @param q_no
12322bfe3f2eSlogwang * Queue number
12332bfe3f2eSlogwang *
12342bfe3f2eSlogwang * @param num_tx_descs
12352bfe3f2eSlogwang * Number of ringbuffer descriptors
12362bfe3f2eSlogwang *
12372bfe3f2eSlogwang * @param socket_id
12382bfe3f2eSlogwang * NUMA socket id, used for memory allocations
12392bfe3f2eSlogwang *
12402bfe3f2eSlogwang * @param tx_conf
12412bfe3f2eSlogwang * Pointer to the structure rte_eth_txconf
12422bfe3f2eSlogwang *
12432bfe3f2eSlogwang * @return
12442bfe3f2eSlogwang * - On success, return 0
12452bfe3f2eSlogwang * - On failure, return -errno value
12462bfe3f2eSlogwang */
12472bfe3f2eSlogwang static int
lio_dev_tx_queue_setup(struct rte_eth_dev * eth_dev,uint16_t q_no,uint16_t num_tx_descs,unsigned int socket_id,const struct rte_eth_txconf * tx_conf __rte_unused)12482bfe3f2eSlogwang lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
12492bfe3f2eSlogwang uint16_t num_tx_descs, unsigned int socket_id,
12502bfe3f2eSlogwang const struct rte_eth_txconf *tx_conf __rte_unused)
12512bfe3f2eSlogwang {
12522bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
12532bfe3f2eSlogwang int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
12542bfe3f2eSlogwang int retval;
12552bfe3f2eSlogwang
12562bfe3f2eSlogwang if (q_no >= lio_dev->nb_tx_queues) {
12572bfe3f2eSlogwang lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
12582bfe3f2eSlogwang return -EINVAL;
12592bfe3f2eSlogwang }
12602bfe3f2eSlogwang
12612bfe3f2eSlogwang lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
12622bfe3f2eSlogwang
1263d30ea906Sjfb8856606 /* Free previous allocation if any */
1264d30ea906Sjfb8856606 if (eth_dev->data->tx_queues[q_no] != NULL) {
1265d30ea906Sjfb8856606 lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
1266d30ea906Sjfb8856606 eth_dev->data->tx_queues[q_no] = NULL;
12672bfe3f2eSlogwang }
12682bfe3f2eSlogwang
12692bfe3f2eSlogwang retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
12702bfe3f2eSlogwang num_tx_descs, lio_dev, socket_id);
12712bfe3f2eSlogwang
12722bfe3f2eSlogwang if (retval) {
12732bfe3f2eSlogwang lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
12742bfe3f2eSlogwang return retval;
12752bfe3f2eSlogwang }
12762bfe3f2eSlogwang
12772bfe3f2eSlogwang retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1278d30ea906Sjfb8856606 lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
12792bfe3f2eSlogwang socket_id);
12802bfe3f2eSlogwang
12812bfe3f2eSlogwang if (retval) {
12822bfe3f2eSlogwang lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
12832bfe3f2eSlogwang return retval;
12842bfe3f2eSlogwang }
12852bfe3f2eSlogwang
12862bfe3f2eSlogwang eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
12872bfe3f2eSlogwang
12882bfe3f2eSlogwang return 0;
12892bfe3f2eSlogwang }
12902bfe3f2eSlogwang
12912bfe3f2eSlogwang /**
12922bfe3f2eSlogwang * Release the transmit queue/ringbuffer. Called by
12932bfe3f2eSlogwang * the upper layers.
12942bfe3f2eSlogwang *
12952bfe3f2eSlogwang * @param txq
12962bfe3f2eSlogwang * Opaque pointer to the transmit queue to release
12972bfe3f2eSlogwang *
12982bfe3f2eSlogwang * @return
12992bfe3f2eSlogwang * - nothing
13002bfe3f2eSlogwang */
13012bfe3f2eSlogwang void
lio_dev_tx_queue_release(void * txq)13022bfe3f2eSlogwang lio_dev_tx_queue_release(void *txq)
13032bfe3f2eSlogwang {
13042bfe3f2eSlogwang struct lio_instr_queue *tq = txq;
13052bfe3f2eSlogwang uint32_t fw_mapped_iq_no;
13062bfe3f2eSlogwang
13072bfe3f2eSlogwang
13082bfe3f2eSlogwang if (tq) {
13092bfe3f2eSlogwang /* Free sg_list */
13102bfe3f2eSlogwang lio_delete_sglist(tq);
13112bfe3f2eSlogwang
13122bfe3f2eSlogwang fw_mapped_iq_no = tq->txpciq.s.q_no;
13132bfe3f2eSlogwang lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
13142bfe3f2eSlogwang }
13152bfe3f2eSlogwang }
13162bfe3f2eSlogwang
13172bfe3f2eSlogwang /**
13182bfe3f2eSlogwang * Api to check link state.
13192bfe3f2eSlogwang */
13202bfe3f2eSlogwang static void
lio_dev_get_link_status(struct rte_eth_dev * eth_dev)13212bfe3f2eSlogwang lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
13222bfe3f2eSlogwang {
13232bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
13242bfe3f2eSlogwang uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
13252bfe3f2eSlogwang struct lio_link_status_resp *resp;
13262bfe3f2eSlogwang union octeon_link_status *ls;
13272bfe3f2eSlogwang struct lio_soft_command *sc;
13282bfe3f2eSlogwang uint32_t resp_size;
13292bfe3f2eSlogwang
13302bfe3f2eSlogwang if (!lio_dev->intf_open)
13312bfe3f2eSlogwang return;
13322bfe3f2eSlogwang
13332bfe3f2eSlogwang resp_size = sizeof(struct lio_link_status_resp);
13342bfe3f2eSlogwang sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
13352bfe3f2eSlogwang if (sc == NULL)
13362bfe3f2eSlogwang return;
13372bfe3f2eSlogwang
13382bfe3f2eSlogwang resp = (struct lio_link_status_resp *)sc->virtrptr;
13392bfe3f2eSlogwang lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
13402bfe3f2eSlogwang LIO_OPCODE_INFO, 0, 0, 0);
13412bfe3f2eSlogwang
13422bfe3f2eSlogwang /* Setting wait time in seconds */
13432bfe3f2eSlogwang sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
13442bfe3f2eSlogwang
13452bfe3f2eSlogwang if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
13462bfe3f2eSlogwang goto get_status_fail;
13472bfe3f2eSlogwang
13482bfe3f2eSlogwang while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
13492bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
13502bfe3f2eSlogwang rte_delay_ms(1);
13512bfe3f2eSlogwang }
13522bfe3f2eSlogwang
13532bfe3f2eSlogwang if (resp->status)
13542bfe3f2eSlogwang goto get_status_fail;
13552bfe3f2eSlogwang
13562bfe3f2eSlogwang ls = &resp->link_info.link;
13572bfe3f2eSlogwang
13582bfe3f2eSlogwang lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
13592bfe3f2eSlogwang
13602bfe3f2eSlogwang if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
13612bfe3f2eSlogwang if (ls->s.mtu < eth_dev->data->mtu) {
13622bfe3f2eSlogwang lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
13632bfe3f2eSlogwang ls->s.mtu);
13642bfe3f2eSlogwang eth_dev->data->mtu = ls->s.mtu;
13652bfe3f2eSlogwang }
13662bfe3f2eSlogwang lio_dev->linfo.link.link_status64 = ls->link_status64;
13672bfe3f2eSlogwang lio_dev_link_update(eth_dev, 0);
13682bfe3f2eSlogwang }
13692bfe3f2eSlogwang
13702bfe3f2eSlogwang lio_free_soft_command(sc);
13712bfe3f2eSlogwang
13722bfe3f2eSlogwang return;
13732bfe3f2eSlogwang
13742bfe3f2eSlogwang get_status_fail:
13752bfe3f2eSlogwang lio_free_soft_command(sc);
13762bfe3f2eSlogwang }
13772bfe3f2eSlogwang
13782bfe3f2eSlogwang /* This function will be invoked every LSC_TIMEOUT ns (100ms)
13792bfe3f2eSlogwang * and will update link state if it changes.
13802bfe3f2eSlogwang */
13812bfe3f2eSlogwang static void
lio_sync_link_state_check(void * eth_dev)13822bfe3f2eSlogwang lio_sync_link_state_check(void *eth_dev)
13832bfe3f2eSlogwang {
13842bfe3f2eSlogwang struct lio_device *lio_dev =
13852bfe3f2eSlogwang (((struct rte_eth_dev *)eth_dev)->data->dev_private);
13862bfe3f2eSlogwang
13872bfe3f2eSlogwang if (lio_dev->port_configured)
13882bfe3f2eSlogwang lio_dev_get_link_status(eth_dev);
13892bfe3f2eSlogwang
13902bfe3f2eSlogwang /* Schedule periodic link status check.
13912bfe3f2eSlogwang * Stop check if interface is close and start again while opening.
13922bfe3f2eSlogwang */
13932bfe3f2eSlogwang if (lio_dev->intf_open)
13942bfe3f2eSlogwang rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
13952bfe3f2eSlogwang eth_dev);
13962bfe3f2eSlogwang }
13972bfe3f2eSlogwang
13982bfe3f2eSlogwang static int
lio_dev_start(struct rte_eth_dev * eth_dev)13992bfe3f2eSlogwang lio_dev_start(struct rte_eth_dev *eth_dev)
14002bfe3f2eSlogwang {
14012bfe3f2eSlogwang uint16_t mtu;
14022bfe3f2eSlogwang uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
14032bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
14042bfe3f2eSlogwang uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
14052bfe3f2eSlogwang int ret = 0;
14062bfe3f2eSlogwang
14072bfe3f2eSlogwang lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
14082bfe3f2eSlogwang
14092bfe3f2eSlogwang if (lio_dev->fn_list.enable_io_queues(lio_dev))
14102bfe3f2eSlogwang return -1;
14112bfe3f2eSlogwang
14122bfe3f2eSlogwang if (lio_send_rx_ctrl_cmd(eth_dev, 1))
14132bfe3f2eSlogwang return -1;
14142bfe3f2eSlogwang
14152bfe3f2eSlogwang /* Ready for link status updates */
14162bfe3f2eSlogwang lio_dev->intf_open = 1;
14172bfe3f2eSlogwang rte_mb();
14182bfe3f2eSlogwang
14192bfe3f2eSlogwang /* Configure RSS if device configured with multiple RX queues. */
14202bfe3f2eSlogwang lio_dev_mq_rx_configure(eth_dev);
14212bfe3f2eSlogwang
1422579bf1e2Sjfb8856606 /* Before update the link info,
1423579bf1e2Sjfb8856606 * must set linfo.link.link_status64 to 0.
1424579bf1e2Sjfb8856606 */
1425579bf1e2Sjfb8856606 lio_dev->linfo.link.link_status64 = 0;
1426579bf1e2Sjfb8856606
14272bfe3f2eSlogwang /* start polling for lsc */
14282bfe3f2eSlogwang ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
14292bfe3f2eSlogwang lio_sync_link_state_check,
14302bfe3f2eSlogwang eth_dev);
14312bfe3f2eSlogwang if (ret) {
14322bfe3f2eSlogwang lio_dev_err(lio_dev,
14332bfe3f2eSlogwang "link state check handler creation failed\n");
14342bfe3f2eSlogwang goto dev_lsc_handle_error;
14352bfe3f2eSlogwang }
14362bfe3f2eSlogwang
14372bfe3f2eSlogwang while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
14382bfe3f2eSlogwang rte_delay_ms(1);
14392bfe3f2eSlogwang
14402bfe3f2eSlogwang if (lio_dev->linfo.link.link_status64 == 0) {
14412bfe3f2eSlogwang ret = -1;
14422bfe3f2eSlogwang goto dev_mtu_set_error;
14432bfe3f2eSlogwang }
14442bfe3f2eSlogwang
14454418919fSjohnjiang mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
14464418919fSjohnjiang if (mtu < RTE_ETHER_MIN_MTU)
14474418919fSjohnjiang mtu = RTE_ETHER_MIN_MTU;
14482bfe3f2eSlogwang
14492bfe3f2eSlogwang if (eth_dev->data->mtu != mtu) {
14502bfe3f2eSlogwang ret = lio_dev_mtu_set(eth_dev, mtu);
14512bfe3f2eSlogwang if (ret)
14522bfe3f2eSlogwang goto dev_mtu_set_error;
14532bfe3f2eSlogwang }
14542bfe3f2eSlogwang
14552bfe3f2eSlogwang return 0;
14562bfe3f2eSlogwang
14572bfe3f2eSlogwang dev_mtu_set_error:
14582bfe3f2eSlogwang rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
14592bfe3f2eSlogwang
14602bfe3f2eSlogwang dev_lsc_handle_error:
14612bfe3f2eSlogwang lio_dev->intf_open = 0;
14622bfe3f2eSlogwang lio_send_rx_ctrl_cmd(eth_dev, 0);
14632bfe3f2eSlogwang
14642bfe3f2eSlogwang return ret;
14652bfe3f2eSlogwang }
14662bfe3f2eSlogwang
14672bfe3f2eSlogwang /* Stop device and disable input/output functions */
1468*2d9fd380Sjfb8856606 static int
lio_dev_stop(struct rte_eth_dev * eth_dev)14692bfe3f2eSlogwang lio_dev_stop(struct rte_eth_dev *eth_dev)
14702bfe3f2eSlogwang {
14712bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
14722bfe3f2eSlogwang
14732bfe3f2eSlogwang lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1474*2d9fd380Sjfb8856606 eth_dev->data->dev_started = 0;
14752bfe3f2eSlogwang lio_dev->intf_open = 0;
14762bfe3f2eSlogwang rte_mb();
14772bfe3f2eSlogwang
14782bfe3f2eSlogwang /* Cancel callback if still running. */
14792bfe3f2eSlogwang rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
14802bfe3f2eSlogwang
14812bfe3f2eSlogwang lio_send_rx_ctrl_cmd(eth_dev, 0);
14822bfe3f2eSlogwang
1483d30ea906Sjfb8856606 lio_wait_for_instr_fetch(lio_dev);
1484d30ea906Sjfb8856606
14852bfe3f2eSlogwang /* Clear recorded link status */
14862bfe3f2eSlogwang lio_dev->linfo.link.link_status64 = 0;
1487*2d9fd380Sjfb8856606
1488*2d9fd380Sjfb8856606 return 0;
14892bfe3f2eSlogwang }
14902bfe3f2eSlogwang
14912bfe3f2eSlogwang static int
lio_dev_set_link_up(struct rte_eth_dev * eth_dev)14922bfe3f2eSlogwang lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
14932bfe3f2eSlogwang {
14942bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
14952bfe3f2eSlogwang
14962bfe3f2eSlogwang if (!lio_dev->intf_open) {
14972bfe3f2eSlogwang lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
14982bfe3f2eSlogwang return 0;
14992bfe3f2eSlogwang }
15002bfe3f2eSlogwang
15012bfe3f2eSlogwang if (lio_dev->linfo.link.s.link_up) {
15022bfe3f2eSlogwang lio_dev_info(lio_dev, "Link is already UP\n");
15032bfe3f2eSlogwang return 0;
15042bfe3f2eSlogwang }
15052bfe3f2eSlogwang
15062bfe3f2eSlogwang if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
15072bfe3f2eSlogwang lio_dev_err(lio_dev, "Unable to set Link UP\n");
15082bfe3f2eSlogwang return -1;
15092bfe3f2eSlogwang }
15102bfe3f2eSlogwang
15112bfe3f2eSlogwang lio_dev->linfo.link.s.link_up = 1;
15122bfe3f2eSlogwang eth_dev->data->dev_link.link_status = ETH_LINK_UP;
15132bfe3f2eSlogwang
15142bfe3f2eSlogwang return 0;
15152bfe3f2eSlogwang }
15162bfe3f2eSlogwang
15172bfe3f2eSlogwang static int
lio_dev_set_link_down(struct rte_eth_dev * eth_dev)15182bfe3f2eSlogwang lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
15192bfe3f2eSlogwang {
15202bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
15212bfe3f2eSlogwang
15222bfe3f2eSlogwang if (!lio_dev->intf_open) {
15232bfe3f2eSlogwang lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
15242bfe3f2eSlogwang return 0;
15252bfe3f2eSlogwang }
15262bfe3f2eSlogwang
15272bfe3f2eSlogwang if (!lio_dev->linfo.link.s.link_up) {
15282bfe3f2eSlogwang lio_dev_info(lio_dev, "Link is already DOWN\n");
15292bfe3f2eSlogwang return 0;
15302bfe3f2eSlogwang }
15312bfe3f2eSlogwang
15322bfe3f2eSlogwang lio_dev->linfo.link.s.link_up = 0;
15332bfe3f2eSlogwang eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
15342bfe3f2eSlogwang
15352bfe3f2eSlogwang if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
15362bfe3f2eSlogwang lio_dev->linfo.link.s.link_up = 1;
15372bfe3f2eSlogwang eth_dev->data->dev_link.link_status = ETH_LINK_UP;
15382bfe3f2eSlogwang lio_dev_err(lio_dev, "Unable to set Link Down\n");
15392bfe3f2eSlogwang return -1;
15402bfe3f2eSlogwang }
15412bfe3f2eSlogwang
15422bfe3f2eSlogwang return 0;
15432bfe3f2eSlogwang }
15442bfe3f2eSlogwang
15452bfe3f2eSlogwang /**
15462bfe3f2eSlogwang * Reset and stop the device. This occurs on the first
15472bfe3f2eSlogwang * call to this routine. Subsequent calls will simply
15482bfe3f2eSlogwang * return. NB: This will require the NIC to be rebooted.
15492bfe3f2eSlogwang *
15502bfe3f2eSlogwang * @param eth_dev
15512bfe3f2eSlogwang * Pointer to the structure rte_eth_dev
15522bfe3f2eSlogwang *
15532bfe3f2eSlogwang * @return
15542bfe3f2eSlogwang * - nothing
15552bfe3f2eSlogwang */
1556*2d9fd380Sjfb8856606 static int
lio_dev_close(struct rte_eth_dev * eth_dev)15572bfe3f2eSlogwang lio_dev_close(struct rte_eth_dev *eth_dev)
15582bfe3f2eSlogwang {
15592bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
1560*2d9fd380Sjfb8856606 int ret = 0;
1561*2d9fd380Sjfb8856606
1562*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1563*2d9fd380Sjfb8856606 return 0;
15642bfe3f2eSlogwang
15652bfe3f2eSlogwang lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
15662bfe3f2eSlogwang
15672bfe3f2eSlogwang if (lio_dev->intf_open)
1568*2d9fd380Sjfb8856606 ret = lio_dev_stop(eth_dev);
15692bfe3f2eSlogwang
1570d30ea906Sjfb8856606 /* Reset ioq regs */
1571d30ea906Sjfb8856606 lio_dev->fn_list.setup_device_regs(lio_dev);
15722bfe3f2eSlogwang
1573*2d9fd380Sjfb8856606 if (lio_dev->pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO) {
15742bfe3f2eSlogwang cn23xx_vf_ask_pf_to_do_flr(lio_dev);
15752bfe3f2eSlogwang rte_delay_ms(LIO_PCI_FLR_WAIT);
15762bfe3f2eSlogwang }
15772bfe3f2eSlogwang
15782bfe3f2eSlogwang /* lio_free_mbox */
15792bfe3f2eSlogwang lio_dev->fn_list.free_mbox(lio_dev);
15802bfe3f2eSlogwang
15812bfe3f2eSlogwang /* Free glist resources */
15822bfe3f2eSlogwang rte_free(lio_dev->glist_head);
15832bfe3f2eSlogwang rte_free(lio_dev->glist_lock);
15842bfe3f2eSlogwang lio_dev->glist_head = NULL;
15852bfe3f2eSlogwang lio_dev->glist_lock = NULL;
15862bfe3f2eSlogwang
15872bfe3f2eSlogwang lio_dev->port_configured = 0;
15882bfe3f2eSlogwang
15892bfe3f2eSlogwang /* Delete all queues */
15902bfe3f2eSlogwang lio_dev_clear_queues(eth_dev);
1591*2d9fd380Sjfb8856606
1592*2d9fd380Sjfb8856606 return ret;
15932bfe3f2eSlogwang }
15942bfe3f2eSlogwang
15952bfe3f2eSlogwang /**
15962bfe3f2eSlogwang * Enable tunnel rx checksum verification from firmware.
15972bfe3f2eSlogwang */
15982bfe3f2eSlogwang static void
lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev * eth_dev)15992bfe3f2eSlogwang lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
16002bfe3f2eSlogwang {
16012bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
16022bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
16032bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
16042bfe3f2eSlogwang
16052bfe3f2eSlogwang /* flush added to prevent cmd failure
16062bfe3f2eSlogwang * incase the queue is full
16072bfe3f2eSlogwang */
16082bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
16092bfe3f2eSlogwang
16102bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
16112bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
16122bfe3f2eSlogwang
16132bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
16142bfe3f2eSlogwang ctrl_cmd.cond = 0;
16152bfe3f2eSlogwang
16162bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
16172bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
16182bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
16192bfe3f2eSlogwang
16202bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
16212bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
16222bfe3f2eSlogwang return;
16232bfe3f2eSlogwang }
16242bfe3f2eSlogwang
16252bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
16262bfe3f2eSlogwang lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
16272bfe3f2eSlogwang }
16282bfe3f2eSlogwang
16292bfe3f2eSlogwang /**
16302bfe3f2eSlogwang * Enable checksum calculation for inner packet in a tunnel.
16312bfe3f2eSlogwang */
16322bfe3f2eSlogwang static void
lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev * eth_dev)16332bfe3f2eSlogwang lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
16342bfe3f2eSlogwang {
16352bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
16362bfe3f2eSlogwang struct lio_dev_ctrl_cmd ctrl_cmd;
16372bfe3f2eSlogwang struct lio_ctrl_pkt ctrl_pkt;
16382bfe3f2eSlogwang
16392bfe3f2eSlogwang /* flush added to prevent cmd failure
16402bfe3f2eSlogwang * incase the queue is full
16412bfe3f2eSlogwang */
16422bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
16432bfe3f2eSlogwang
16442bfe3f2eSlogwang memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
16452bfe3f2eSlogwang memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
16462bfe3f2eSlogwang
16472bfe3f2eSlogwang ctrl_cmd.eth_dev = eth_dev;
16482bfe3f2eSlogwang ctrl_cmd.cond = 0;
16492bfe3f2eSlogwang
16502bfe3f2eSlogwang ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
16512bfe3f2eSlogwang ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
16522bfe3f2eSlogwang ctrl_pkt.ctrl_cmd = &ctrl_cmd;
16532bfe3f2eSlogwang
16542bfe3f2eSlogwang if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
16552bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
16562bfe3f2eSlogwang return;
16572bfe3f2eSlogwang }
16582bfe3f2eSlogwang
16592bfe3f2eSlogwang if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
16602bfe3f2eSlogwang lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
16612bfe3f2eSlogwang }
16622bfe3f2eSlogwang
1663d30ea906Sjfb8856606 static int
lio_send_queue_count_update(struct rte_eth_dev * eth_dev,int num_txq,int num_rxq)1664d30ea906Sjfb8856606 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
1665d30ea906Sjfb8856606 int num_rxq)
1666d30ea906Sjfb8856606 {
1667d30ea906Sjfb8856606 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1668d30ea906Sjfb8856606 struct lio_dev_ctrl_cmd ctrl_cmd;
1669d30ea906Sjfb8856606 struct lio_ctrl_pkt ctrl_pkt;
1670d30ea906Sjfb8856606
1671d30ea906Sjfb8856606 if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
1672d30ea906Sjfb8856606 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1673d30ea906Sjfb8856606 LIO_Q_RECONF_MIN_VERSION);
1674d30ea906Sjfb8856606 return -ENOTSUP;
1675d30ea906Sjfb8856606 }
1676d30ea906Sjfb8856606
1677d30ea906Sjfb8856606 /* flush added to prevent cmd failure
1678d30ea906Sjfb8856606 * incase the queue is full
1679d30ea906Sjfb8856606 */
1680d30ea906Sjfb8856606 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1681d30ea906Sjfb8856606
1682d30ea906Sjfb8856606 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1683d30ea906Sjfb8856606 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1684d30ea906Sjfb8856606
1685d30ea906Sjfb8856606 ctrl_cmd.eth_dev = eth_dev;
1686d30ea906Sjfb8856606 ctrl_cmd.cond = 0;
1687d30ea906Sjfb8856606
1688d30ea906Sjfb8856606 ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
1689d30ea906Sjfb8856606 ctrl_pkt.ncmd.s.param1 = num_txq;
1690d30ea906Sjfb8856606 ctrl_pkt.ncmd.s.param2 = num_rxq;
1691d30ea906Sjfb8856606 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1692d30ea906Sjfb8856606
1693d30ea906Sjfb8856606 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1694d30ea906Sjfb8856606 lio_dev_err(lio_dev, "Failed to send queue count control command\n");
1695d30ea906Sjfb8856606 return -1;
1696d30ea906Sjfb8856606 }
1697d30ea906Sjfb8856606
1698d30ea906Sjfb8856606 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
1699d30ea906Sjfb8856606 lio_dev_err(lio_dev, "Queue count control command timed out\n");
1700d30ea906Sjfb8856606 return -1;
1701d30ea906Sjfb8856606 }
1702d30ea906Sjfb8856606
1703d30ea906Sjfb8856606 return 0;
1704d30ea906Sjfb8856606 }
1705d30ea906Sjfb8856606
1706d30ea906Sjfb8856606 static int
lio_reconf_queues(struct rte_eth_dev * eth_dev,int num_txq,int num_rxq)1707d30ea906Sjfb8856606 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
1708d30ea906Sjfb8856606 {
1709d30ea906Sjfb8856606 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1710*2d9fd380Sjfb8856606 int ret;
1711d30ea906Sjfb8856606
1712d30ea906Sjfb8856606 if (lio_dev->nb_rx_queues != num_rxq ||
1713d30ea906Sjfb8856606 lio_dev->nb_tx_queues != num_txq) {
1714d30ea906Sjfb8856606 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
1715d30ea906Sjfb8856606 return -1;
1716d30ea906Sjfb8856606 lio_dev->nb_rx_queues = num_rxq;
1717d30ea906Sjfb8856606 lio_dev->nb_tx_queues = num_txq;
1718d30ea906Sjfb8856606 }
1719d30ea906Sjfb8856606
1720*2d9fd380Sjfb8856606 if (lio_dev->intf_open) {
1721*2d9fd380Sjfb8856606 ret = lio_dev_stop(eth_dev);
1722*2d9fd380Sjfb8856606 if (ret != 0)
1723*2d9fd380Sjfb8856606 return ret;
1724*2d9fd380Sjfb8856606 }
1725d30ea906Sjfb8856606
1726d30ea906Sjfb8856606 /* Reset ioq registers */
1727d30ea906Sjfb8856606 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1728d30ea906Sjfb8856606 lio_dev_err(lio_dev, "Failed to configure device registers\n");
1729d30ea906Sjfb8856606 return -1;
1730d30ea906Sjfb8856606 }
1731d30ea906Sjfb8856606
1732d30ea906Sjfb8856606 return 0;
1733d30ea906Sjfb8856606 }
1734d30ea906Sjfb8856606
1735d30ea906Sjfb8856606 static int
lio_dev_configure(struct rte_eth_dev * eth_dev)1736d30ea906Sjfb8856606 lio_dev_configure(struct rte_eth_dev *eth_dev)
17372bfe3f2eSlogwang {
17382bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
17392bfe3f2eSlogwang uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
17402bfe3f2eSlogwang int retval, num_iqueues, num_oqueues;
17414418919fSjohnjiang uint8_t mac[RTE_ETHER_ADDR_LEN], i;
17422bfe3f2eSlogwang struct lio_if_cfg_resp *resp;
17432bfe3f2eSlogwang struct lio_soft_command *sc;
17442bfe3f2eSlogwang union lio_if_cfg if_cfg;
17452bfe3f2eSlogwang uint32_t resp_size;
17462bfe3f2eSlogwang
17472bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
17482bfe3f2eSlogwang
17494418919fSjohnjiang if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
17504418919fSjohnjiang eth_dev->data->dev_conf.rxmode.offloads |=
17514418919fSjohnjiang DEV_RX_OFFLOAD_RSS_HASH;
17524418919fSjohnjiang
1753d30ea906Sjfb8856606 /* Inform firmware about change in number of queues to use.
1754d30ea906Sjfb8856606 * Disable IO queues and reset registers for re-configuration.
17552bfe3f2eSlogwang */
1756d30ea906Sjfb8856606 if (lio_dev->port_configured)
1757d30ea906Sjfb8856606 return lio_reconf_queues(eth_dev,
1758d30ea906Sjfb8856606 eth_dev->data->nb_tx_queues,
1759d30ea906Sjfb8856606 eth_dev->data->nb_rx_queues);
17602bfe3f2eSlogwang
17612bfe3f2eSlogwang lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
17622bfe3f2eSlogwang lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
17632bfe3f2eSlogwang
1764d30ea906Sjfb8856606 /* Set max number of queues which can be re-configured. */
1765d30ea906Sjfb8856606 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
1766d30ea906Sjfb8856606 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
1767d30ea906Sjfb8856606
17682bfe3f2eSlogwang resp_size = sizeof(struct lio_if_cfg_resp);
17692bfe3f2eSlogwang sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
17702bfe3f2eSlogwang if (sc == NULL)
17712bfe3f2eSlogwang return -ENOMEM;
17722bfe3f2eSlogwang
17732bfe3f2eSlogwang resp = (struct lio_if_cfg_resp *)sc->virtrptr;
17742bfe3f2eSlogwang
17752bfe3f2eSlogwang /* Firmware doesn't have capability to reconfigure the queues,
17762bfe3f2eSlogwang * Claim all queues, and use as many required
17772bfe3f2eSlogwang */
17782bfe3f2eSlogwang if_cfg.if_cfg64 = 0;
17792bfe3f2eSlogwang if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
17802bfe3f2eSlogwang if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
17812bfe3f2eSlogwang if_cfg.s.base_queue = 0;
17822bfe3f2eSlogwang
17832bfe3f2eSlogwang if_cfg.s.gmx_port_id = lio_dev->pf_num;
17842bfe3f2eSlogwang
17852bfe3f2eSlogwang lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
17862bfe3f2eSlogwang LIO_OPCODE_IF_CFG, 0,
17872bfe3f2eSlogwang if_cfg.if_cfg64, 0);
17882bfe3f2eSlogwang
17892bfe3f2eSlogwang /* Setting wait time in seconds */
17902bfe3f2eSlogwang sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
17912bfe3f2eSlogwang
17922bfe3f2eSlogwang retval = lio_send_soft_command(lio_dev, sc);
17932bfe3f2eSlogwang if (retval == LIO_IQ_SEND_FAILED) {
17942bfe3f2eSlogwang lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
17952bfe3f2eSlogwang retval);
17962bfe3f2eSlogwang /* Soft instr is freed by driver in case of failure. */
17972bfe3f2eSlogwang goto nic_config_fail;
17982bfe3f2eSlogwang }
17992bfe3f2eSlogwang
18002bfe3f2eSlogwang /* Sleep on a wait queue till the cond flag indicates that the
18012bfe3f2eSlogwang * response arrived or timed-out.
18022bfe3f2eSlogwang */
18032bfe3f2eSlogwang while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
18042bfe3f2eSlogwang lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
18052bfe3f2eSlogwang lio_process_ordered_list(lio_dev);
18062bfe3f2eSlogwang rte_delay_ms(1);
18072bfe3f2eSlogwang }
18082bfe3f2eSlogwang
18092bfe3f2eSlogwang retval = resp->status;
18102bfe3f2eSlogwang if (retval) {
18112bfe3f2eSlogwang lio_dev_err(lio_dev, "iq/oq config failed\n");
18122bfe3f2eSlogwang goto nic_config_fail;
18132bfe3f2eSlogwang }
18142bfe3f2eSlogwang
18154418919fSjohnjiang strlcpy(lio_dev->firmware_version,
18164418919fSjohnjiang resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH);
18172bfe3f2eSlogwang
18182bfe3f2eSlogwang lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
18192bfe3f2eSlogwang sizeof(struct octeon_if_cfg_info) >> 3);
18202bfe3f2eSlogwang
18212bfe3f2eSlogwang num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
18222bfe3f2eSlogwang num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
18232bfe3f2eSlogwang
18242bfe3f2eSlogwang if (!(num_iqueues) || !(num_oqueues)) {
18252bfe3f2eSlogwang lio_dev_err(lio_dev,
18262bfe3f2eSlogwang "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
18272bfe3f2eSlogwang (unsigned long)resp->cfg_info.iqmask,
18282bfe3f2eSlogwang (unsigned long)resp->cfg_info.oqmask);
18292bfe3f2eSlogwang goto nic_config_fail;
18302bfe3f2eSlogwang }
18312bfe3f2eSlogwang
18322bfe3f2eSlogwang lio_dev_dbg(lio_dev,
18332bfe3f2eSlogwang "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
18342bfe3f2eSlogwang eth_dev->data->port_id,
18352bfe3f2eSlogwang (unsigned long)resp->cfg_info.iqmask,
18362bfe3f2eSlogwang (unsigned long)resp->cfg_info.oqmask,
18372bfe3f2eSlogwang num_iqueues, num_oqueues);
18382bfe3f2eSlogwang
18392bfe3f2eSlogwang lio_dev->linfo.num_rxpciq = num_oqueues;
18402bfe3f2eSlogwang lio_dev->linfo.num_txpciq = num_iqueues;
18412bfe3f2eSlogwang
18422bfe3f2eSlogwang for (i = 0; i < num_oqueues; i++) {
18432bfe3f2eSlogwang lio_dev->linfo.rxpciq[i].rxpciq64 =
18442bfe3f2eSlogwang resp->cfg_info.linfo.rxpciq[i].rxpciq64;
18452bfe3f2eSlogwang lio_dev_dbg(lio_dev, "index %d OQ %d\n",
18462bfe3f2eSlogwang i, lio_dev->linfo.rxpciq[i].s.q_no);
18472bfe3f2eSlogwang }
18482bfe3f2eSlogwang
18492bfe3f2eSlogwang for (i = 0; i < num_iqueues; i++) {
18502bfe3f2eSlogwang lio_dev->linfo.txpciq[i].txpciq64 =
18512bfe3f2eSlogwang resp->cfg_info.linfo.txpciq[i].txpciq64;
18522bfe3f2eSlogwang lio_dev_dbg(lio_dev, "index %d IQ %d\n",
18532bfe3f2eSlogwang i, lio_dev->linfo.txpciq[i].s.q_no);
18542bfe3f2eSlogwang }
18552bfe3f2eSlogwang
18562bfe3f2eSlogwang lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
18572bfe3f2eSlogwang lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
18582bfe3f2eSlogwang lio_dev->linfo.link.link_status64 =
18592bfe3f2eSlogwang resp->cfg_info.linfo.link.link_status64;
18602bfe3f2eSlogwang
18612bfe3f2eSlogwang /* 64-bit swap required on LE machines */
18622bfe3f2eSlogwang lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
18634418919fSjohnjiang for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
18642bfe3f2eSlogwang mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
18652bfe3f2eSlogwang 2 + i));
18662bfe3f2eSlogwang
18672bfe3f2eSlogwang /* Copy the permanent MAC address */
18684418919fSjohnjiang rte_ether_addr_copy((struct rte_ether_addr *)mac,
18694418919fSjohnjiang ð_dev->data->mac_addrs[0]);
18702bfe3f2eSlogwang
18712bfe3f2eSlogwang /* enable firmware checksum support for tunnel packets */
18722bfe3f2eSlogwang lio_enable_hw_tunnel_rx_checksum(eth_dev);
18732bfe3f2eSlogwang lio_enable_hw_tunnel_tx_checksum(eth_dev);
18742bfe3f2eSlogwang
18752bfe3f2eSlogwang lio_dev->glist_lock =
18762bfe3f2eSlogwang rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
18772bfe3f2eSlogwang if (lio_dev->glist_lock == NULL)
18782bfe3f2eSlogwang return -ENOMEM;
18792bfe3f2eSlogwang
18802bfe3f2eSlogwang lio_dev->glist_head =
18812bfe3f2eSlogwang rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
18822bfe3f2eSlogwang 0);
18832bfe3f2eSlogwang if (lio_dev->glist_head == NULL) {
18842bfe3f2eSlogwang rte_free(lio_dev->glist_lock);
18852bfe3f2eSlogwang lio_dev->glist_lock = NULL;
18862bfe3f2eSlogwang return -ENOMEM;
18872bfe3f2eSlogwang }
18882bfe3f2eSlogwang
18892bfe3f2eSlogwang lio_dev_link_update(eth_dev, 0);
18902bfe3f2eSlogwang
18912bfe3f2eSlogwang lio_dev->port_configured = 1;
18922bfe3f2eSlogwang
18932bfe3f2eSlogwang lio_free_soft_command(sc);
18942bfe3f2eSlogwang
18952bfe3f2eSlogwang /* Reset ioq regs */
18962bfe3f2eSlogwang lio_dev->fn_list.setup_device_regs(lio_dev);
18972bfe3f2eSlogwang
18982bfe3f2eSlogwang /* Free iq_0 used during init */
18992bfe3f2eSlogwang lio_free_instr_queue0(lio_dev);
19002bfe3f2eSlogwang
19012bfe3f2eSlogwang return 0;
19022bfe3f2eSlogwang
19032bfe3f2eSlogwang nic_config_fail:
19042bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed retval %d\n", retval);
19052bfe3f2eSlogwang lio_free_soft_command(sc);
19062bfe3f2eSlogwang lio_free_instr_queue0(lio_dev);
19072bfe3f2eSlogwang
19082bfe3f2eSlogwang return -ENODEV;
19092bfe3f2eSlogwang }
19102bfe3f2eSlogwang
19112bfe3f2eSlogwang /* Define our ethernet definitions */
19122bfe3f2eSlogwang static const struct eth_dev_ops liovf_eth_dev_ops = {
19132bfe3f2eSlogwang .dev_configure = lio_dev_configure,
19142bfe3f2eSlogwang .dev_start = lio_dev_start,
19152bfe3f2eSlogwang .dev_stop = lio_dev_stop,
19162bfe3f2eSlogwang .dev_set_link_up = lio_dev_set_link_up,
19172bfe3f2eSlogwang .dev_set_link_down = lio_dev_set_link_down,
19182bfe3f2eSlogwang .dev_close = lio_dev_close,
19192bfe3f2eSlogwang .promiscuous_enable = lio_dev_promiscuous_enable,
19202bfe3f2eSlogwang .promiscuous_disable = lio_dev_promiscuous_disable,
19212bfe3f2eSlogwang .allmulticast_enable = lio_dev_allmulticast_enable,
19222bfe3f2eSlogwang .allmulticast_disable = lio_dev_allmulticast_disable,
19232bfe3f2eSlogwang .link_update = lio_dev_link_update,
19242bfe3f2eSlogwang .stats_get = lio_dev_stats_get,
19252bfe3f2eSlogwang .xstats_get = lio_dev_xstats_get,
19262bfe3f2eSlogwang .xstats_get_names = lio_dev_xstats_get_names,
19272bfe3f2eSlogwang .stats_reset = lio_dev_stats_reset,
19282bfe3f2eSlogwang .xstats_reset = lio_dev_xstats_reset,
19292bfe3f2eSlogwang .dev_infos_get = lio_dev_info_get,
19302bfe3f2eSlogwang .vlan_filter_set = lio_dev_vlan_filter_set,
19312bfe3f2eSlogwang .rx_queue_setup = lio_dev_rx_queue_setup,
19322bfe3f2eSlogwang .rx_queue_release = lio_dev_rx_queue_release,
19332bfe3f2eSlogwang .tx_queue_setup = lio_dev_tx_queue_setup,
19342bfe3f2eSlogwang .tx_queue_release = lio_dev_tx_queue_release,
19352bfe3f2eSlogwang .reta_update = lio_dev_rss_reta_update,
19362bfe3f2eSlogwang .reta_query = lio_dev_rss_reta_query,
19372bfe3f2eSlogwang .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
19382bfe3f2eSlogwang .rss_hash_update = lio_dev_rss_hash_update,
19392bfe3f2eSlogwang .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
19402bfe3f2eSlogwang .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
19412bfe3f2eSlogwang .mtu_set = lio_dev_mtu_set,
19422bfe3f2eSlogwang };
19432bfe3f2eSlogwang
19442bfe3f2eSlogwang static void
lio_check_pf_hs_response(void * lio_dev)19452bfe3f2eSlogwang lio_check_pf_hs_response(void *lio_dev)
19462bfe3f2eSlogwang {
19472bfe3f2eSlogwang struct lio_device *dev = lio_dev;
19482bfe3f2eSlogwang
19492bfe3f2eSlogwang /* check till response arrives */
19502bfe3f2eSlogwang if (dev->pfvf_hsword.coproc_tics_per_us)
19512bfe3f2eSlogwang return;
19522bfe3f2eSlogwang
19532bfe3f2eSlogwang cn23xx_vf_handle_mbox(dev);
19542bfe3f2eSlogwang
19552bfe3f2eSlogwang rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
19562bfe3f2eSlogwang }
19572bfe3f2eSlogwang
19582bfe3f2eSlogwang /**
19592bfe3f2eSlogwang * \brief Identify the LIO device and to map the BAR address space
19602bfe3f2eSlogwang * @param lio_dev lio device
19612bfe3f2eSlogwang */
19622bfe3f2eSlogwang static int
lio_chip_specific_setup(struct lio_device * lio_dev)19632bfe3f2eSlogwang lio_chip_specific_setup(struct lio_device *lio_dev)
19642bfe3f2eSlogwang {
19652bfe3f2eSlogwang struct rte_pci_device *pdev = lio_dev->pci_dev;
19662bfe3f2eSlogwang uint32_t dev_id = pdev->id.device_id;
19672bfe3f2eSlogwang const char *s;
19682bfe3f2eSlogwang int ret = 1;
19692bfe3f2eSlogwang
19702bfe3f2eSlogwang switch (dev_id) {
19712bfe3f2eSlogwang case LIO_CN23XX_VF_VID:
19722bfe3f2eSlogwang lio_dev->chip_id = LIO_CN23XX_VF_VID;
19732bfe3f2eSlogwang ret = cn23xx_vf_setup_device(lio_dev);
19742bfe3f2eSlogwang s = "CN23XX VF";
19752bfe3f2eSlogwang break;
19762bfe3f2eSlogwang default:
19772bfe3f2eSlogwang s = "?";
19782bfe3f2eSlogwang lio_dev_err(lio_dev, "Unsupported Chip\n");
19792bfe3f2eSlogwang }
19802bfe3f2eSlogwang
19812bfe3f2eSlogwang if (!ret)
19822bfe3f2eSlogwang lio_dev_info(lio_dev, "DEVICE : %s\n", s);
19832bfe3f2eSlogwang
19842bfe3f2eSlogwang return ret;
19852bfe3f2eSlogwang }
19862bfe3f2eSlogwang
19872bfe3f2eSlogwang static int
lio_first_time_init(struct lio_device * lio_dev,struct rte_pci_device * pdev)19882bfe3f2eSlogwang lio_first_time_init(struct lio_device *lio_dev,
19892bfe3f2eSlogwang struct rte_pci_device *pdev)
19902bfe3f2eSlogwang {
19912bfe3f2eSlogwang int dpdk_queues;
19922bfe3f2eSlogwang
19932bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
19942bfe3f2eSlogwang
19952bfe3f2eSlogwang /* set dpdk specific pci device pointer */
19962bfe3f2eSlogwang lio_dev->pci_dev = pdev;
19972bfe3f2eSlogwang
19982bfe3f2eSlogwang /* Identify the LIO type and set device ops */
19992bfe3f2eSlogwang if (lio_chip_specific_setup(lio_dev)) {
20002bfe3f2eSlogwang lio_dev_err(lio_dev, "Chip specific setup failed\n");
20012bfe3f2eSlogwang return -1;
20022bfe3f2eSlogwang }
20032bfe3f2eSlogwang
20042bfe3f2eSlogwang /* Initialize soft command buffer pool */
20052bfe3f2eSlogwang if (lio_setup_sc_buffer_pool(lio_dev)) {
20062bfe3f2eSlogwang lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
20072bfe3f2eSlogwang return -1;
20082bfe3f2eSlogwang }
20092bfe3f2eSlogwang
20102bfe3f2eSlogwang /* Initialize lists to manage the requests of different types that
20112bfe3f2eSlogwang * arrive from applications for this lio device.
20122bfe3f2eSlogwang */
20132bfe3f2eSlogwang lio_setup_response_list(lio_dev);
20142bfe3f2eSlogwang
20152bfe3f2eSlogwang if (lio_dev->fn_list.setup_mbox(lio_dev)) {
20162bfe3f2eSlogwang lio_dev_err(lio_dev, "Mailbox setup failed\n");
20172bfe3f2eSlogwang goto error;
20182bfe3f2eSlogwang }
20192bfe3f2eSlogwang
20202bfe3f2eSlogwang /* Check PF response */
20212bfe3f2eSlogwang lio_check_pf_hs_response((void *)lio_dev);
20222bfe3f2eSlogwang
20232bfe3f2eSlogwang /* Do handshake and exit if incompatible PF driver */
20242bfe3f2eSlogwang if (cn23xx_pfvf_handshake(lio_dev))
20252bfe3f2eSlogwang goto error;
20262bfe3f2eSlogwang
20272bfe3f2eSlogwang /* Request and wait for device reset. */
2028*2d9fd380Sjfb8856606 if (pdev->kdrv == RTE_PCI_KDRV_IGB_UIO) {
20292bfe3f2eSlogwang cn23xx_vf_ask_pf_to_do_flr(lio_dev);
20302bfe3f2eSlogwang /* FLR wait time doubled as a precaution. */
20312bfe3f2eSlogwang rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
20322bfe3f2eSlogwang }
20332bfe3f2eSlogwang
20342bfe3f2eSlogwang if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
20352bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to configure device registers\n");
20362bfe3f2eSlogwang goto error;
20372bfe3f2eSlogwang }
20382bfe3f2eSlogwang
20392bfe3f2eSlogwang if (lio_setup_instr_queue0(lio_dev)) {
20402bfe3f2eSlogwang lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
20412bfe3f2eSlogwang goto error;
20422bfe3f2eSlogwang }
20432bfe3f2eSlogwang
20442bfe3f2eSlogwang dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
20452bfe3f2eSlogwang
20462bfe3f2eSlogwang lio_dev->max_tx_queues = dpdk_queues;
20472bfe3f2eSlogwang lio_dev->max_rx_queues = dpdk_queues;
20482bfe3f2eSlogwang
20492bfe3f2eSlogwang /* Enable input and output queues for this device */
20502bfe3f2eSlogwang if (lio_dev->fn_list.enable_io_queues(lio_dev))
20512bfe3f2eSlogwang goto error;
20522bfe3f2eSlogwang
20532bfe3f2eSlogwang return 0;
20542bfe3f2eSlogwang
20552bfe3f2eSlogwang error:
20562bfe3f2eSlogwang lio_free_sc_buffer_pool(lio_dev);
20572bfe3f2eSlogwang if (lio_dev->mbox[0])
20582bfe3f2eSlogwang lio_dev->fn_list.free_mbox(lio_dev);
20592bfe3f2eSlogwang if (lio_dev->instr_queue[0])
20602bfe3f2eSlogwang lio_free_instr_queue0(lio_dev);
20612bfe3f2eSlogwang
20622bfe3f2eSlogwang return -1;
20632bfe3f2eSlogwang }
20642bfe3f2eSlogwang
20652bfe3f2eSlogwang static int
lio_eth_dev_uninit(struct rte_eth_dev * eth_dev)20662bfe3f2eSlogwang lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
20672bfe3f2eSlogwang {
20682bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
20692bfe3f2eSlogwang
20702bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
20712bfe3f2eSlogwang
20722bfe3f2eSlogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2073d30ea906Sjfb8856606 return 0;
20742bfe3f2eSlogwang
20752bfe3f2eSlogwang /* lio_free_sc_buffer_pool */
20762bfe3f2eSlogwang lio_free_sc_buffer_pool(lio_dev);
20772bfe3f2eSlogwang
20782bfe3f2eSlogwang return 0;
20792bfe3f2eSlogwang }
20802bfe3f2eSlogwang
20812bfe3f2eSlogwang static int
lio_eth_dev_init(struct rte_eth_dev * eth_dev)20822bfe3f2eSlogwang lio_eth_dev_init(struct rte_eth_dev *eth_dev)
20832bfe3f2eSlogwang {
20842bfe3f2eSlogwang struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
20852bfe3f2eSlogwang struct lio_device *lio_dev = LIO_DEV(eth_dev);
20862bfe3f2eSlogwang
20872bfe3f2eSlogwang PMD_INIT_FUNC_TRACE();
20882bfe3f2eSlogwang
20892bfe3f2eSlogwang eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
20902bfe3f2eSlogwang eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
20912bfe3f2eSlogwang
20922bfe3f2eSlogwang /* Primary does the initialization. */
20932bfe3f2eSlogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
20942bfe3f2eSlogwang return 0;
20952bfe3f2eSlogwang
20962bfe3f2eSlogwang rte_eth_copy_pci_info(eth_dev, pdev);
2097*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
20982bfe3f2eSlogwang
20992bfe3f2eSlogwang if (pdev->mem_resource[0].addr) {
21002bfe3f2eSlogwang lio_dev->hw_addr = pdev->mem_resource[0].addr;
21012bfe3f2eSlogwang } else {
21022bfe3f2eSlogwang PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
21032bfe3f2eSlogwang return -ENODEV;
21042bfe3f2eSlogwang }
21052bfe3f2eSlogwang
21062bfe3f2eSlogwang lio_dev->eth_dev = eth_dev;
21072bfe3f2eSlogwang /* set lio device print string */
21082bfe3f2eSlogwang snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
21092bfe3f2eSlogwang "%s[%02x:%02x.%x]", pdev->driver->driver.name,
21102bfe3f2eSlogwang pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
21112bfe3f2eSlogwang
21122bfe3f2eSlogwang lio_dev->port_id = eth_dev->data->port_id;
21132bfe3f2eSlogwang
21142bfe3f2eSlogwang if (lio_first_time_init(lio_dev, pdev)) {
21152bfe3f2eSlogwang lio_dev_err(lio_dev, "Device init failed\n");
21162bfe3f2eSlogwang return -EINVAL;
21172bfe3f2eSlogwang }
21182bfe3f2eSlogwang
21192bfe3f2eSlogwang eth_dev->dev_ops = &liovf_eth_dev_ops;
21204418919fSjohnjiang eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0);
21212bfe3f2eSlogwang if (eth_dev->data->mac_addrs == NULL) {
21222bfe3f2eSlogwang lio_dev_err(lio_dev,
21232bfe3f2eSlogwang "MAC addresses memory allocation failed\n");
21242bfe3f2eSlogwang eth_dev->dev_ops = NULL;
21252bfe3f2eSlogwang eth_dev->rx_pkt_burst = NULL;
21262bfe3f2eSlogwang eth_dev->tx_pkt_burst = NULL;
21272bfe3f2eSlogwang return -ENOMEM;
21282bfe3f2eSlogwang }
21292bfe3f2eSlogwang
21302bfe3f2eSlogwang rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
21312bfe3f2eSlogwang rte_wmb();
21322bfe3f2eSlogwang
21332bfe3f2eSlogwang lio_dev->port_configured = 0;
21342bfe3f2eSlogwang /* Always allow unicast packets */
21352bfe3f2eSlogwang lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
21362bfe3f2eSlogwang
21372bfe3f2eSlogwang return 0;
21382bfe3f2eSlogwang }
21392bfe3f2eSlogwang
21402bfe3f2eSlogwang static int
lio_eth_dev_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)21412bfe3f2eSlogwang lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
21422bfe3f2eSlogwang struct rte_pci_device *pci_dev)
21432bfe3f2eSlogwang {
2144d30ea906Sjfb8856606 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
2145d30ea906Sjfb8856606 lio_eth_dev_init);
21462bfe3f2eSlogwang }
21472bfe3f2eSlogwang
21482bfe3f2eSlogwang static int
lio_eth_dev_pci_remove(struct rte_pci_device * pci_dev)21492bfe3f2eSlogwang lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
21502bfe3f2eSlogwang {
21512bfe3f2eSlogwang return rte_eth_dev_pci_generic_remove(pci_dev,
21522bfe3f2eSlogwang lio_eth_dev_uninit);
21532bfe3f2eSlogwang }
21542bfe3f2eSlogwang
21552bfe3f2eSlogwang /* Set of PCI devices this driver supports */
21562bfe3f2eSlogwang static const struct rte_pci_id pci_id_liovf_map[] = {
21572bfe3f2eSlogwang { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
21582bfe3f2eSlogwang { .vendor_id = 0, /* sentinel */ }
21592bfe3f2eSlogwang };
21602bfe3f2eSlogwang
21612bfe3f2eSlogwang static struct rte_pci_driver rte_liovf_pmd = {
21622bfe3f2eSlogwang .id_table = pci_id_liovf_map,
21632bfe3f2eSlogwang .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
21642bfe3f2eSlogwang .probe = lio_eth_dev_pci_probe,
21652bfe3f2eSlogwang .remove = lio_eth_dev_pci_remove,
21662bfe3f2eSlogwang };
21672bfe3f2eSlogwang
21682bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
21692bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
21702bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
2171*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(lio_logtype_init, pmd.net.liquidio.init, NOTICE);
2172*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(lio_logtype_driver, pmd.net.liquidio.driver, NOTICE);
2173