xref: /f-stack/dpdk/drivers/net/liquidio/lio_ethdev.c (revision aa61e4b5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_driver.h>
7 #include <rte_ethdev_pci.h>
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_alarm.h>
11 #include <rte_ether.h>
12 
13 #include "lio_logs.h"
14 #include "lio_23xx_vf.h"
15 #include "lio_ethdev.h"
16 #include "lio_rxtx.h"
17 
18 int lio_logtype_init;
19 int lio_logtype_driver;
20 
21 /* Default RSS key in use */
22 static uint8_t lio_rss_key[40] = {
23 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
24 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
25 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
26 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
27 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
28 };
29 
30 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
31 	.nb_max		= CN23XX_MAX_OQ_DESCRIPTORS,
32 	.nb_min		= CN23XX_MIN_OQ_DESCRIPTORS,
33 	.nb_align	= 1,
34 };
35 
36 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
37 	.nb_max		= CN23XX_MAX_IQ_DESCRIPTORS,
38 	.nb_min		= CN23XX_MIN_IQ_DESCRIPTORS,
39 	.nb_align	= 1,
40 };
41 
42 /* Wait for control command to reach nic. */
43 static uint16_t
44 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
45 		      struct lio_dev_ctrl_cmd *ctrl_cmd)
46 {
47 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
48 
49 	while ((ctrl_cmd->cond == 0) && --timeout) {
50 		lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
51 		rte_delay_ms(1);
52 	}
53 
54 	return !timeout;
55 }
56 
57 /**
58  * \brief Send Rx control command
59  * @param eth_dev Pointer to the structure rte_eth_dev
60  * @param start_stop whether to start or stop
61  */
62 static int
63 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
64 {
65 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
66 	struct lio_dev_ctrl_cmd ctrl_cmd;
67 	struct lio_ctrl_pkt ctrl_pkt;
68 
69 	/* flush added to prevent cmd failure
70 	 * incase the queue is full
71 	 */
72 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
73 
74 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
75 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
76 
77 	ctrl_cmd.eth_dev = eth_dev;
78 	ctrl_cmd.cond = 0;
79 
80 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
81 	ctrl_pkt.ncmd.s.param1 = start_stop;
82 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
83 
84 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
85 		lio_dev_err(lio_dev, "Failed to send RX Control message\n");
86 		return -1;
87 	}
88 
89 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
90 		lio_dev_err(lio_dev, "RX Control command timed out\n");
91 		return -1;
92 	}
93 
94 	return 0;
95 }
96 
97 /* store statistics names and its offset in stats structure */
98 struct rte_lio_xstats_name_off {
99 	char name[RTE_ETH_XSTATS_NAME_SIZE];
100 	unsigned int offset;
101 };
102 
103 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
104 	{"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
105 	{"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
106 	{"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
107 	{"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
108 	{"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
109 	{"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
110 	{"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
111 	{"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
112 	{"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
113 	{"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
114 	{"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
115 	{"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
116 	{"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
117 	{"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
118 						sizeof(struct octeon_rx_stats)},
119 	{"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
120 						sizeof(struct octeon_rx_stats)},
121 	{"tx_broadcast_pkts",
122 		(offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
123 			sizeof(struct octeon_rx_stats)},
124 	{"tx_multicast_pkts",
125 		(offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
126 			sizeof(struct octeon_rx_stats)},
127 	{"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
128 						sizeof(struct octeon_rx_stats)},
129 	{"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
130 						sizeof(struct octeon_rx_stats)},
131 	{"tx_total_collisions", (offsetof(struct octeon_tx_stats,
132 					  total_collisions)) +
133 						sizeof(struct octeon_rx_stats)},
134 	{"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
135 						sizeof(struct octeon_rx_stats)},
136 	{"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
137 						sizeof(struct octeon_rx_stats)},
138 };
139 
140 #define LIO_NB_XSTATS	RTE_DIM(rte_lio_stats_strings)
141 
142 /* Get hw stats of the port */
143 static int
144 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
145 		   unsigned int n)
146 {
147 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
148 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
149 	struct octeon_link_stats *hw_stats;
150 	struct lio_link_stats_resp *resp;
151 	struct lio_soft_command *sc;
152 	uint32_t resp_size;
153 	unsigned int i;
154 	int retval;
155 
156 	if (!lio_dev->intf_open) {
157 		lio_dev_err(lio_dev, "Port %d down\n",
158 			    lio_dev->port_id);
159 		return -EINVAL;
160 	}
161 
162 	if (n < LIO_NB_XSTATS)
163 		return LIO_NB_XSTATS;
164 
165 	resp_size = sizeof(struct lio_link_stats_resp);
166 	sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
167 	if (sc == NULL)
168 		return -ENOMEM;
169 
170 	resp = (struct lio_link_stats_resp *)sc->virtrptr;
171 	lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
172 				 LIO_OPCODE_PORT_STATS, 0, 0, 0);
173 
174 	/* Setting wait time in seconds */
175 	sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
176 
177 	retval = lio_send_soft_command(lio_dev, sc);
178 	if (retval == LIO_IQ_SEND_FAILED) {
179 		lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
180 			    retval);
181 		goto get_stats_fail;
182 	}
183 
184 	while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
185 		lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
186 		lio_process_ordered_list(lio_dev);
187 		rte_delay_ms(1);
188 	}
189 
190 	retval = resp->status;
191 	if (retval) {
192 		lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
193 		goto get_stats_fail;
194 	}
195 
196 	lio_swap_8B_data((uint64_t *)(&resp->link_stats),
197 			 sizeof(struct octeon_link_stats) >> 3);
198 
199 	hw_stats = &resp->link_stats;
200 
201 	for (i = 0; i < LIO_NB_XSTATS; i++) {
202 		xstats[i].id = i;
203 		xstats[i].value =
204 		    *(uint64_t *)(((char *)hw_stats) +
205 					rte_lio_stats_strings[i].offset);
206 	}
207 
208 	lio_free_soft_command(sc);
209 
210 	return LIO_NB_XSTATS;
211 
212 get_stats_fail:
213 	lio_free_soft_command(sc);
214 
215 	return -1;
216 }
217 
218 static int
219 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
220 			 struct rte_eth_xstat_name *xstats_names,
221 			 unsigned limit __rte_unused)
222 {
223 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
224 	unsigned int i;
225 
226 	if (!lio_dev->intf_open) {
227 		lio_dev_err(lio_dev, "Port %d down\n",
228 			    lio_dev->port_id);
229 		return -EINVAL;
230 	}
231 
232 	if (xstats_names == NULL)
233 		return LIO_NB_XSTATS;
234 
235 	/* Note: limit checked in rte_eth_xstats_names() */
236 
237 	for (i = 0; i < LIO_NB_XSTATS; i++) {
238 		snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
239 			 "%s", rte_lio_stats_strings[i].name);
240 	}
241 
242 	return LIO_NB_XSTATS;
243 }
244 
245 /* Reset hw stats for the port */
246 static int
247 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
248 {
249 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
250 	struct lio_dev_ctrl_cmd ctrl_cmd;
251 	struct lio_ctrl_pkt ctrl_pkt;
252 	int ret;
253 
254 	if (!lio_dev->intf_open) {
255 		lio_dev_err(lio_dev, "Port %d down\n",
256 			    lio_dev->port_id);
257 		return -EINVAL;
258 	}
259 
260 	/* flush added to prevent cmd failure
261 	 * incase the queue is full
262 	 */
263 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
264 
265 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
266 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
267 
268 	ctrl_cmd.eth_dev = eth_dev;
269 	ctrl_cmd.cond = 0;
270 
271 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
272 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
273 
274 	ret = lio_send_ctrl_pkt(lio_dev, &ctrl_pkt);
275 	if (ret != 0) {
276 		lio_dev_err(lio_dev, "Failed to send clear stats command\n");
277 		return ret;
278 	}
279 
280 	ret = lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd);
281 	if (ret != 0) {
282 		lio_dev_err(lio_dev, "Clear stats command timed out\n");
283 		return ret;
284 	}
285 
286 	/* clear stored per queue stats */
287 	RTE_FUNC_PTR_OR_ERR_RET(*eth_dev->dev_ops->stats_reset, 0);
288 	return (*eth_dev->dev_ops->stats_reset)(eth_dev);
289 }
290 
291 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
292 static int
293 lio_dev_stats_get(struct rte_eth_dev *eth_dev,
294 		  struct rte_eth_stats *stats)
295 {
296 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
297 	struct lio_droq_stats *oq_stats;
298 	struct lio_iq_stats *iq_stats;
299 	struct lio_instr_queue *txq;
300 	struct lio_droq *droq;
301 	int i, iq_no, oq_no;
302 	uint64_t bytes = 0;
303 	uint64_t pkts = 0;
304 	uint64_t drop = 0;
305 
306 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
307 		iq_no = lio_dev->linfo.txpciq[i].s.q_no;
308 		txq = lio_dev->instr_queue[iq_no];
309 		if (txq != NULL) {
310 			iq_stats = &txq->stats;
311 			pkts += iq_stats->tx_done;
312 			drop += iq_stats->tx_dropped;
313 			bytes += iq_stats->tx_tot_bytes;
314 		}
315 	}
316 
317 	stats->opackets = pkts;
318 	stats->obytes = bytes;
319 	stats->oerrors = drop;
320 
321 	pkts = 0;
322 	drop = 0;
323 	bytes = 0;
324 
325 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
326 		oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
327 		droq = lio_dev->droq[oq_no];
328 		if (droq != NULL) {
329 			oq_stats = &droq->stats;
330 			pkts += oq_stats->rx_pkts_received;
331 			drop += (oq_stats->rx_dropped +
332 					oq_stats->dropped_toomany +
333 					oq_stats->dropped_nomem);
334 			bytes += oq_stats->rx_bytes_received;
335 		}
336 	}
337 	stats->ibytes = bytes;
338 	stats->ipackets = pkts;
339 	stats->ierrors = drop;
340 
341 	return 0;
342 }
343 
344 static int
345 lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
346 {
347 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
348 	struct lio_droq_stats *oq_stats;
349 	struct lio_iq_stats *iq_stats;
350 	struct lio_instr_queue *txq;
351 	struct lio_droq *droq;
352 	int i, iq_no, oq_no;
353 
354 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
355 		iq_no = lio_dev->linfo.txpciq[i].s.q_no;
356 		txq = lio_dev->instr_queue[iq_no];
357 		if (txq != NULL) {
358 			iq_stats = &txq->stats;
359 			memset(iq_stats, 0, sizeof(struct lio_iq_stats));
360 		}
361 	}
362 
363 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
364 		oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
365 		droq = lio_dev->droq[oq_no];
366 		if (droq != NULL) {
367 			oq_stats = &droq->stats;
368 			memset(oq_stats, 0, sizeof(struct lio_droq_stats));
369 		}
370 	}
371 
372 	return 0;
373 }
374 
375 static int
376 lio_dev_info_get(struct rte_eth_dev *eth_dev,
377 		 struct rte_eth_dev_info *devinfo)
378 {
379 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
380 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
381 
382 	switch (pci_dev->id.subsystem_device_id) {
383 	/* CN23xx 10G cards */
384 	case PCI_SUBSYS_DEV_ID_CN2350_210:
385 	case PCI_SUBSYS_DEV_ID_CN2360_210:
386 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
387 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
388 	case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
389 	case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
390 		devinfo->speed_capa = ETH_LINK_SPEED_10G;
391 		break;
392 	/* CN23xx 25G cards */
393 	case PCI_SUBSYS_DEV_ID_CN2350_225:
394 	case PCI_SUBSYS_DEV_ID_CN2360_225:
395 		devinfo->speed_capa = ETH_LINK_SPEED_25G;
396 		break;
397 	default:
398 		devinfo->speed_capa = ETH_LINK_SPEED_10G;
399 		lio_dev_err(lio_dev,
400 			    "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
401 		return -EINVAL;
402 	}
403 
404 	devinfo->max_rx_queues = lio_dev->max_rx_queues;
405 	devinfo->max_tx_queues = lio_dev->max_tx_queues;
406 
407 	devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
408 	devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
409 
410 	devinfo->max_mac_addrs = 1;
411 
412 	devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM		|
413 				    DEV_RX_OFFLOAD_UDP_CKSUM		|
414 				    DEV_RX_OFFLOAD_TCP_CKSUM		|
415 				    DEV_RX_OFFLOAD_VLAN_STRIP		|
416 				    DEV_RX_OFFLOAD_RSS_HASH);
417 	devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM		|
418 				    DEV_TX_OFFLOAD_UDP_CKSUM		|
419 				    DEV_TX_OFFLOAD_TCP_CKSUM		|
420 				    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
421 
422 	devinfo->rx_desc_lim = lio_rx_desc_lim;
423 	devinfo->tx_desc_lim = lio_tx_desc_lim;
424 
425 	devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
426 	devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
427 	devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4			|
428 					   ETH_RSS_NONFRAG_IPV4_TCP	|
429 					   ETH_RSS_IPV6			|
430 					   ETH_RSS_NONFRAG_IPV6_TCP	|
431 					   ETH_RSS_IPV6_EX		|
432 					   ETH_RSS_IPV6_TCP_EX);
433 	return 0;
434 }
435 
436 static int
437 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
438 {
439 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
440 	uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
441 	uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
442 	struct lio_dev_ctrl_cmd ctrl_cmd;
443 	struct lio_ctrl_pkt ctrl_pkt;
444 
445 	PMD_INIT_FUNC_TRACE();
446 
447 	if (!lio_dev->intf_open) {
448 		lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
449 			    lio_dev->port_id);
450 		return -EINVAL;
451 	}
452 
453 	/* check if VF MTU is within allowed range.
454 	 * New value should not exceed PF MTU.
455 	 */
456 	if (mtu < RTE_ETHER_MIN_MTU || mtu > pf_mtu) {
457 		lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
458 			    RTE_ETHER_MIN_MTU, pf_mtu);
459 		return -EINVAL;
460 	}
461 
462 	/* flush added to prevent cmd failure
463 	 * incase the queue is full
464 	 */
465 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
466 
467 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
468 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
469 
470 	ctrl_cmd.eth_dev = eth_dev;
471 	ctrl_cmd.cond = 0;
472 
473 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
474 	ctrl_pkt.ncmd.s.param1 = mtu;
475 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
476 
477 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
478 		lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
479 		return -1;
480 	}
481 
482 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
483 		lio_dev_err(lio_dev, "Command to change MTU timed out\n");
484 		return -1;
485 	}
486 
487 	if (frame_len > RTE_ETHER_MAX_LEN)
488 		eth_dev->data->dev_conf.rxmode.offloads |=
489 			DEV_RX_OFFLOAD_JUMBO_FRAME;
490 	else
491 		eth_dev->data->dev_conf.rxmode.offloads &=
492 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
493 
494 	eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
495 	eth_dev->data->mtu = mtu;
496 
497 	return 0;
498 }
499 
500 static int
501 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
502 			struct rte_eth_rss_reta_entry64 *reta_conf,
503 			uint16_t reta_size)
504 {
505 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
506 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
507 	struct lio_rss_set *rss_param;
508 	struct lio_dev_ctrl_cmd ctrl_cmd;
509 	struct lio_ctrl_pkt ctrl_pkt;
510 	int i, j, index;
511 
512 	if (!lio_dev->intf_open) {
513 		lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
514 			    lio_dev->port_id);
515 		return -EINVAL;
516 	}
517 
518 	if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
519 		lio_dev_err(lio_dev,
520 			    "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
521 			    reta_size, LIO_RSS_MAX_TABLE_SZ);
522 		return -EINVAL;
523 	}
524 
525 	/* flush added to prevent cmd failure
526 	 * incase the queue is full
527 	 */
528 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
529 
530 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
531 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
532 
533 	rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
534 
535 	ctrl_cmd.eth_dev = eth_dev;
536 	ctrl_cmd.cond = 0;
537 
538 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
539 	ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
540 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
541 
542 	rss_param->param.flags = 0xF;
543 	rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
544 	rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
545 
546 	for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
547 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
548 			if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
549 				index = (i * RTE_RETA_GROUP_SIZE) + j;
550 				rss_state->itable[index] = reta_conf[i].reta[j];
551 			}
552 		}
553 	}
554 
555 	rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
556 	memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
557 
558 	lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
559 
560 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
561 		lio_dev_err(lio_dev, "Failed to set rss hash\n");
562 		return -1;
563 	}
564 
565 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
566 		lio_dev_err(lio_dev, "Set rss hash timed out\n");
567 		return -1;
568 	}
569 
570 	return 0;
571 }
572 
573 static int
574 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
575 		       struct rte_eth_rss_reta_entry64 *reta_conf,
576 		       uint16_t reta_size)
577 {
578 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
579 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
580 	int i, num;
581 
582 	if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
583 		lio_dev_err(lio_dev,
584 			    "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
585 			    reta_size, LIO_RSS_MAX_TABLE_SZ);
586 		return -EINVAL;
587 	}
588 
589 	num = reta_size / RTE_RETA_GROUP_SIZE;
590 
591 	for (i = 0; i < num; i++) {
592 		memcpy(reta_conf->reta,
593 		       &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
594 		       RTE_RETA_GROUP_SIZE);
595 		reta_conf++;
596 	}
597 
598 	return 0;
599 }
600 
601 static int
602 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
603 			  struct rte_eth_rss_conf *rss_conf)
604 {
605 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
606 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
607 	uint8_t *hash_key = NULL;
608 	uint64_t rss_hf = 0;
609 
610 	if (rss_state->hash_disable) {
611 		lio_dev_info(lio_dev, "RSS disabled in nic\n");
612 		rss_conf->rss_hf = 0;
613 		return 0;
614 	}
615 
616 	/* Get key value */
617 	hash_key = rss_conf->rss_key;
618 	if (hash_key != NULL)
619 		memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
620 
621 	if (rss_state->ip)
622 		rss_hf |= ETH_RSS_IPV4;
623 	if (rss_state->tcp_hash)
624 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
625 	if (rss_state->ipv6)
626 		rss_hf |= ETH_RSS_IPV6;
627 	if (rss_state->ipv6_tcp_hash)
628 		rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
629 	if (rss_state->ipv6_ex)
630 		rss_hf |= ETH_RSS_IPV6_EX;
631 	if (rss_state->ipv6_tcp_ex_hash)
632 		rss_hf |= ETH_RSS_IPV6_TCP_EX;
633 
634 	rss_conf->rss_hf = rss_hf;
635 
636 	return 0;
637 }
638 
639 static int
640 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
641 			struct rte_eth_rss_conf *rss_conf)
642 {
643 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
644 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
645 	struct lio_rss_set *rss_param;
646 	struct lio_dev_ctrl_cmd ctrl_cmd;
647 	struct lio_ctrl_pkt ctrl_pkt;
648 
649 	if (!lio_dev->intf_open) {
650 		lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
651 			    lio_dev->port_id);
652 		return -EINVAL;
653 	}
654 
655 	/* flush added to prevent cmd failure
656 	 * incase the queue is full
657 	 */
658 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
659 
660 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
661 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
662 
663 	rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
664 
665 	ctrl_cmd.eth_dev = eth_dev;
666 	ctrl_cmd.cond = 0;
667 
668 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
669 	ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
670 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
671 
672 	rss_param->param.flags = 0xF;
673 
674 	if (rss_conf->rss_key) {
675 		rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
676 		rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
677 		rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
678 		memcpy(rss_state->hash_key, rss_conf->rss_key,
679 		       rss_state->hash_key_size);
680 		memcpy(rss_param->key, rss_state->hash_key,
681 		       rss_state->hash_key_size);
682 	}
683 
684 	if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
685 		/* Can't disable rss through hash flags,
686 		 * if it is enabled by default during init
687 		 */
688 		if (!rss_state->hash_disable)
689 			return -EINVAL;
690 
691 		/* This is for --disable-rss during testpmd launch */
692 		rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
693 	} else {
694 		uint32_t hashinfo = 0;
695 
696 		/* Can't enable rss if disabled by default during init */
697 		if (rss_state->hash_disable)
698 			return -EINVAL;
699 
700 		if (rss_conf->rss_hf & ETH_RSS_IPV4) {
701 			hashinfo |= LIO_RSS_HASH_IPV4;
702 			rss_state->ip = 1;
703 		} else {
704 			rss_state->ip = 0;
705 		}
706 
707 		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
708 			hashinfo |= LIO_RSS_HASH_TCP_IPV4;
709 			rss_state->tcp_hash = 1;
710 		} else {
711 			rss_state->tcp_hash = 0;
712 		}
713 
714 		if (rss_conf->rss_hf & ETH_RSS_IPV6) {
715 			hashinfo |= LIO_RSS_HASH_IPV6;
716 			rss_state->ipv6 = 1;
717 		} else {
718 			rss_state->ipv6 = 0;
719 		}
720 
721 		if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
722 			hashinfo |= LIO_RSS_HASH_TCP_IPV6;
723 			rss_state->ipv6_tcp_hash = 1;
724 		} else {
725 			rss_state->ipv6_tcp_hash = 0;
726 		}
727 
728 		if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
729 			hashinfo |= LIO_RSS_HASH_IPV6_EX;
730 			rss_state->ipv6_ex = 1;
731 		} else {
732 			rss_state->ipv6_ex = 0;
733 		}
734 
735 		if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
736 			hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
737 			rss_state->ipv6_tcp_ex_hash = 1;
738 		} else {
739 			rss_state->ipv6_tcp_ex_hash = 0;
740 		}
741 
742 		rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
743 		rss_param->param.hashinfo = hashinfo;
744 	}
745 
746 	lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
747 
748 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
749 		lio_dev_err(lio_dev, "Failed to set rss hash\n");
750 		return -1;
751 	}
752 
753 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
754 		lio_dev_err(lio_dev, "Set rss hash timed out\n");
755 		return -1;
756 	}
757 
758 	return 0;
759 }
760 
761 /**
762  * Add vxlan dest udp port for an interface.
763  *
764  * @param eth_dev
765  *  Pointer to the structure rte_eth_dev
766  * @param udp_tnl
767  *  udp tunnel conf
768  *
769  * @return
770  *  On success return 0
771  *  On failure return -1
772  */
773 static int
774 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
775 		       struct rte_eth_udp_tunnel *udp_tnl)
776 {
777 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
778 	struct lio_dev_ctrl_cmd ctrl_cmd;
779 	struct lio_ctrl_pkt ctrl_pkt;
780 
781 	if (udp_tnl == NULL)
782 		return -EINVAL;
783 
784 	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
785 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
786 		return -1;
787 	}
788 
789 	/* flush added to prevent cmd failure
790 	 * incase the queue is full
791 	 */
792 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
793 
794 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
795 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
796 
797 	ctrl_cmd.eth_dev = eth_dev;
798 	ctrl_cmd.cond = 0;
799 
800 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
801 	ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
802 	ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
803 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
804 
805 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
806 		lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
807 		return -1;
808 	}
809 
810 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
811 		lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
812 		return -1;
813 	}
814 
815 	return 0;
816 }
817 
818 /**
819  * Remove vxlan dest udp port for an interface.
820  *
821  * @param eth_dev
822  *  Pointer to the structure rte_eth_dev
823  * @param udp_tnl
824  *  udp tunnel conf
825  *
826  * @return
827  *  On success return 0
828  *  On failure return -1
829  */
830 static int
831 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
832 		       struct rte_eth_udp_tunnel *udp_tnl)
833 {
834 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
835 	struct lio_dev_ctrl_cmd ctrl_cmd;
836 	struct lio_ctrl_pkt ctrl_pkt;
837 
838 	if (udp_tnl == NULL)
839 		return -EINVAL;
840 
841 	if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
842 		lio_dev_err(lio_dev, "Unsupported tunnel type\n");
843 		return -1;
844 	}
845 
846 	/* flush added to prevent cmd failure
847 	 * incase the queue is full
848 	 */
849 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
850 
851 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
852 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
853 
854 	ctrl_cmd.eth_dev = eth_dev;
855 	ctrl_cmd.cond = 0;
856 
857 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
858 	ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
859 	ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
860 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
861 
862 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
863 		lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
864 		return -1;
865 	}
866 
867 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
868 		lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
869 		return -1;
870 	}
871 
872 	return 0;
873 }
874 
875 static int
876 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
877 {
878 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
879 	struct lio_dev_ctrl_cmd ctrl_cmd;
880 	struct lio_ctrl_pkt ctrl_pkt;
881 
882 	if (lio_dev->linfo.vlan_is_admin_assigned)
883 		return -EPERM;
884 
885 	/* flush added to prevent cmd failure
886 	 * incase the queue is full
887 	 */
888 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
889 
890 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
891 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
892 
893 	ctrl_cmd.eth_dev = eth_dev;
894 	ctrl_cmd.cond = 0;
895 
896 	ctrl_pkt.ncmd.s.cmd = on ?
897 			LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
898 	ctrl_pkt.ncmd.s.param1 = vlan_id;
899 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
900 
901 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
902 		lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
903 			    on ? "add" : "remove");
904 		return -1;
905 	}
906 
907 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
908 		lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
909 			    on ? "add" : "remove");
910 		return -1;
911 	}
912 
913 	return 0;
914 }
915 
916 static uint64_t
917 lio_hweight64(uint64_t w)
918 {
919 	uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
920 
921 	res =
922 	    (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
923 	res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
924 	res = res + (res >> 8);
925 	res = res + (res >> 16);
926 
927 	return (res + (res >> 32)) & 0x00000000000000FFul;
928 }
929 
930 static int
931 lio_dev_link_update(struct rte_eth_dev *eth_dev,
932 		    int wait_to_complete __rte_unused)
933 {
934 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
935 	struct rte_eth_link link;
936 
937 	/* Initialize */
938 	memset(&link, 0, sizeof(link));
939 	link.link_status = ETH_LINK_DOWN;
940 	link.link_speed = ETH_SPEED_NUM_NONE;
941 	link.link_duplex = ETH_LINK_HALF_DUPLEX;
942 	link.link_autoneg = ETH_LINK_AUTONEG;
943 
944 	/* Return what we found */
945 	if (lio_dev->linfo.link.s.link_up == 0) {
946 		/* Interface is down */
947 		return rte_eth_linkstatus_set(eth_dev, &link);
948 	}
949 
950 	link.link_status = ETH_LINK_UP; /* Interface is up */
951 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
952 	switch (lio_dev->linfo.link.s.speed) {
953 	case LIO_LINK_SPEED_10000:
954 		link.link_speed = ETH_SPEED_NUM_10G;
955 		break;
956 	case LIO_LINK_SPEED_25000:
957 		link.link_speed = ETH_SPEED_NUM_25G;
958 		break;
959 	default:
960 		link.link_speed = ETH_SPEED_NUM_NONE;
961 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
962 	}
963 
964 	return rte_eth_linkstatus_set(eth_dev, &link);
965 }
966 
967 /**
968  * \brief Net device enable, disable allmulticast
969  * @param eth_dev Pointer to the structure rte_eth_dev
970  *
971  * @return
972  *  On success return 0
973  *  On failure return negative errno
974  */
975 static int
976 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
977 {
978 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
979 	struct lio_dev_ctrl_cmd ctrl_cmd;
980 	struct lio_ctrl_pkt ctrl_pkt;
981 
982 	/* flush added to prevent cmd failure
983 	 * incase the queue is full
984 	 */
985 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
986 
987 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
988 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
989 
990 	ctrl_cmd.eth_dev = eth_dev;
991 	ctrl_cmd.cond = 0;
992 
993 	/* Create a ctrl pkt command to be sent to core app. */
994 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
995 	ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
996 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
997 
998 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
999 		lio_dev_err(lio_dev, "Failed to send change flag message\n");
1000 		return -EAGAIN;
1001 	}
1002 
1003 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
1004 		lio_dev_err(lio_dev, "Change dev flag command timed out\n");
1005 		return -ETIMEDOUT;
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static int
1012 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1013 {
1014 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1015 
1016 	if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1017 		lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1018 			    LIO_VF_TRUST_MIN_VERSION);
1019 		return -EAGAIN;
1020 	}
1021 
1022 	if (!lio_dev->intf_open) {
1023 		lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
1024 			    lio_dev->port_id);
1025 		return -EAGAIN;
1026 	}
1027 
1028 	lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
1029 	return lio_change_dev_flag(eth_dev);
1030 }
1031 
1032 static int
1033 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1034 {
1035 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1036 
1037 	if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1038 		lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1039 			    LIO_VF_TRUST_MIN_VERSION);
1040 		return -EAGAIN;
1041 	}
1042 
1043 	if (!lio_dev->intf_open) {
1044 		lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
1045 			    lio_dev->port_id);
1046 		return -EAGAIN;
1047 	}
1048 
1049 	lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
1050 	return lio_change_dev_flag(eth_dev);
1051 }
1052 
1053 static int
1054 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
1055 {
1056 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1057 
1058 	if (!lio_dev->intf_open) {
1059 		lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
1060 			    lio_dev->port_id);
1061 		return -EAGAIN;
1062 	}
1063 
1064 	lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
1065 	return lio_change_dev_flag(eth_dev);
1066 }
1067 
1068 static int
1069 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
1070 {
1071 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1072 
1073 	if (!lio_dev->intf_open) {
1074 		lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
1075 			    lio_dev->port_id);
1076 		return -EAGAIN;
1077 	}
1078 
1079 	lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
1080 	return lio_change_dev_flag(eth_dev);
1081 }
1082 
1083 static void
1084 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
1085 {
1086 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1087 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1088 	struct rte_eth_rss_reta_entry64 reta_conf[8];
1089 	struct rte_eth_rss_conf rss_conf;
1090 	uint16_t i;
1091 
1092 	/* Configure the RSS key and the RSS protocols used to compute
1093 	 * the RSS hash of input packets.
1094 	 */
1095 	rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1096 	if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
1097 		rss_state->hash_disable = 1;
1098 		lio_dev_rss_hash_update(eth_dev, &rss_conf);
1099 		return;
1100 	}
1101 
1102 	if (rss_conf.rss_key == NULL)
1103 		rss_conf.rss_key = lio_rss_key; /* Default hash key */
1104 
1105 	lio_dev_rss_hash_update(eth_dev, &rss_conf);
1106 
1107 	memset(reta_conf, 0, sizeof(reta_conf));
1108 	for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
1109 		uint8_t q_idx, conf_idx, reta_idx;
1110 
1111 		q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
1112 				  i % eth_dev->data->nb_rx_queues : 0);
1113 		conf_idx = i / RTE_RETA_GROUP_SIZE;
1114 		reta_idx = i % RTE_RETA_GROUP_SIZE;
1115 		reta_conf[conf_idx].reta[reta_idx] = q_idx;
1116 		reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
1117 	}
1118 
1119 	lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
1120 }
1121 
1122 static void
1123 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
1124 {
1125 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1126 	struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1127 	struct rte_eth_rss_conf rss_conf;
1128 
1129 	switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
1130 	case ETH_MQ_RX_RSS:
1131 		lio_dev_rss_configure(eth_dev);
1132 		break;
1133 	case ETH_MQ_RX_NONE:
1134 	/* if mq_mode is none, disable rss mode. */
1135 	default:
1136 		memset(&rss_conf, 0, sizeof(rss_conf));
1137 		rss_state->hash_disable = 1;
1138 		lio_dev_rss_hash_update(eth_dev, &rss_conf);
1139 	}
1140 }
1141 
1142 /**
1143  * Setup our receive queue/ringbuffer. This is the
1144  * queue the Octeon uses to send us packets and
1145  * responses. We are given a memory pool for our
1146  * packet buffers that are used to populate the receive
1147  * queue.
1148  *
1149  * @param eth_dev
1150  *    Pointer to the structure rte_eth_dev
1151  * @param q_no
1152  *    Queue number
1153  * @param num_rx_descs
1154  *    Number of entries in the queue
1155  * @param socket_id
1156  *    Where to allocate memory
1157  * @param rx_conf
1158  *    Pointer to the struction rte_eth_rxconf
1159  * @param mp
1160  *    Pointer to the packet pool
1161  *
1162  * @return
1163  *    - On success, return 0
1164  *    - On failure, return -1
1165  */
1166 static int
1167 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1168 		       uint16_t num_rx_descs, unsigned int socket_id,
1169 		       const struct rte_eth_rxconf *rx_conf __rte_unused,
1170 		       struct rte_mempool *mp)
1171 {
1172 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1173 	struct rte_pktmbuf_pool_private *mbp_priv;
1174 	uint32_t fw_mapped_oq;
1175 	uint16_t buf_size;
1176 
1177 	if (q_no >= lio_dev->nb_rx_queues) {
1178 		lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
1179 		return -EINVAL;
1180 	}
1181 
1182 	lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
1183 
1184 	fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
1185 
1186 	/* Free previous allocation if any */
1187 	if (eth_dev->data->rx_queues[q_no] != NULL) {
1188 		lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
1189 		eth_dev->data->rx_queues[q_no] = NULL;
1190 	}
1191 
1192 	mbp_priv = rte_mempool_get_priv(mp);
1193 	buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1194 
1195 	if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
1196 			   socket_id)) {
1197 		lio_dev_err(lio_dev, "droq allocation failed\n");
1198 		return -1;
1199 	}
1200 
1201 	eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
1202 
1203 	return 0;
1204 }
1205 
1206 /**
1207  * Release the receive queue/ringbuffer. Called by
1208  * the upper layers.
1209  *
1210  * @param rxq
1211  *    Opaque pointer to the receive queue to release
1212  *
1213  * @return
1214  *    - nothing
1215  */
1216 void
1217 lio_dev_rx_queue_release(void *rxq)
1218 {
1219 	struct lio_droq *droq = rxq;
1220 	int oq_no;
1221 
1222 	if (droq) {
1223 		oq_no = droq->q_no;
1224 		lio_delete_droq_queue(droq->lio_dev, oq_no);
1225 	}
1226 }
1227 
1228 /**
1229  * Allocate and initialize SW ring. Initialize associated HW registers.
1230  *
1231  * @param eth_dev
1232  *   Pointer to structure rte_eth_dev
1233  *
1234  * @param q_no
1235  *   Queue number
1236  *
1237  * @param num_tx_descs
1238  *   Number of ringbuffer descriptors
1239  *
1240  * @param socket_id
1241  *   NUMA socket id, used for memory allocations
1242  *
1243  * @param tx_conf
1244  *   Pointer to the structure rte_eth_txconf
1245  *
1246  * @return
1247  *   - On success, return 0
1248  *   - On failure, return -errno value
1249  */
1250 static int
1251 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1252 		       uint16_t num_tx_descs, unsigned int socket_id,
1253 		       const struct rte_eth_txconf *tx_conf __rte_unused)
1254 {
1255 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1256 	int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
1257 	int retval;
1258 
1259 	if (q_no >= lio_dev->nb_tx_queues) {
1260 		lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
1261 		return -EINVAL;
1262 	}
1263 
1264 	lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
1265 
1266 	/* Free previous allocation if any */
1267 	if (eth_dev->data->tx_queues[q_no] != NULL) {
1268 		lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
1269 		eth_dev->data->tx_queues[q_no] = NULL;
1270 	}
1271 
1272 	retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
1273 			      num_tx_descs, lio_dev, socket_id);
1274 
1275 	if (retval) {
1276 		lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
1277 		return retval;
1278 	}
1279 
1280 	retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1281 				lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
1282 				socket_id);
1283 
1284 	if (retval) {
1285 		lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
1286 		return retval;
1287 	}
1288 
1289 	eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
1290 
1291 	return 0;
1292 }
1293 
1294 /**
1295  * Release the transmit queue/ringbuffer. Called by
1296  * the upper layers.
1297  *
1298  * @param txq
1299  *    Opaque pointer to the transmit queue to release
1300  *
1301  * @return
1302  *    - nothing
1303  */
1304 void
1305 lio_dev_tx_queue_release(void *txq)
1306 {
1307 	struct lio_instr_queue *tq = txq;
1308 	uint32_t fw_mapped_iq_no;
1309 
1310 
1311 	if (tq) {
1312 		/* Free sg_list */
1313 		lio_delete_sglist(tq);
1314 
1315 		fw_mapped_iq_no = tq->txpciq.s.q_no;
1316 		lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
1317 	}
1318 }
1319 
1320 /**
1321  * Api to check link state.
1322  */
1323 static void
1324 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
1325 {
1326 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1327 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1328 	struct lio_link_status_resp *resp;
1329 	union octeon_link_status *ls;
1330 	struct lio_soft_command *sc;
1331 	uint32_t resp_size;
1332 
1333 	if (!lio_dev->intf_open)
1334 		return;
1335 
1336 	resp_size = sizeof(struct lio_link_status_resp);
1337 	sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1338 	if (sc == NULL)
1339 		return;
1340 
1341 	resp = (struct lio_link_status_resp *)sc->virtrptr;
1342 	lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1343 				 LIO_OPCODE_INFO, 0, 0, 0);
1344 
1345 	/* Setting wait time in seconds */
1346 	sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1347 
1348 	if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
1349 		goto get_status_fail;
1350 
1351 	while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1352 		lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1353 		rte_delay_ms(1);
1354 	}
1355 
1356 	if (resp->status)
1357 		goto get_status_fail;
1358 
1359 	ls = &resp->link_info.link;
1360 
1361 	lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
1362 
1363 	if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
1364 		if (ls->s.mtu < eth_dev->data->mtu) {
1365 			lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
1366 				     ls->s.mtu);
1367 			eth_dev->data->mtu = ls->s.mtu;
1368 		}
1369 		lio_dev->linfo.link.link_status64 = ls->link_status64;
1370 		lio_dev_link_update(eth_dev, 0);
1371 	}
1372 
1373 	lio_free_soft_command(sc);
1374 
1375 	return;
1376 
1377 get_status_fail:
1378 	lio_free_soft_command(sc);
1379 }
1380 
1381 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
1382  * and will update link state if it changes.
1383  */
1384 static void
1385 lio_sync_link_state_check(void *eth_dev)
1386 {
1387 	struct lio_device *lio_dev =
1388 		(((struct rte_eth_dev *)eth_dev)->data->dev_private);
1389 
1390 	if (lio_dev->port_configured)
1391 		lio_dev_get_link_status(eth_dev);
1392 
1393 	/* Schedule periodic link status check.
1394 	 * Stop check if interface is close and start again while opening.
1395 	 */
1396 	if (lio_dev->intf_open)
1397 		rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
1398 				  eth_dev);
1399 }
1400 
1401 static int
1402 lio_dev_start(struct rte_eth_dev *eth_dev)
1403 {
1404 	uint16_t mtu;
1405 	uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1406 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1407 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1408 	int ret = 0;
1409 
1410 	lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
1411 
1412 	if (lio_dev->fn_list.enable_io_queues(lio_dev))
1413 		return -1;
1414 
1415 	if (lio_send_rx_ctrl_cmd(eth_dev, 1))
1416 		return -1;
1417 
1418 	/* Ready for link status updates */
1419 	lio_dev->intf_open = 1;
1420 	rte_mb();
1421 
1422 	/* Configure RSS if device configured with multiple RX queues. */
1423 	lio_dev_mq_rx_configure(eth_dev);
1424 
1425 	/* Before update the link info,
1426 	 * must set linfo.link.link_status64 to 0.
1427 	 */
1428 	lio_dev->linfo.link.link_status64 = 0;
1429 
1430 	/* start polling for lsc */
1431 	ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
1432 				lio_sync_link_state_check,
1433 				eth_dev);
1434 	if (ret) {
1435 		lio_dev_err(lio_dev,
1436 			    "link state check handler creation failed\n");
1437 		goto dev_lsc_handle_error;
1438 	}
1439 
1440 	while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
1441 		rte_delay_ms(1);
1442 
1443 	if (lio_dev->linfo.link.link_status64 == 0) {
1444 		ret = -1;
1445 		goto dev_mtu_set_error;
1446 	}
1447 
1448 	mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
1449 	if (mtu < RTE_ETHER_MIN_MTU)
1450 		mtu = RTE_ETHER_MIN_MTU;
1451 
1452 	if (eth_dev->data->mtu != mtu) {
1453 		ret = lio_dev_mtu_set(eth_dev, mtu);
1454 		if (ret)
1455 			goto dev_mtu_set_error;
1456 	}
1457 
1458 	return 0;
1459 
1460 dev_mtu_set_error:
1461 	rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1462 
1463 dev_lsc_handle_error:
1464 	lio_dev->intf_open = 0;
1465 	lio_send_rx_ctrl_cmd(eth_dev, 0);
1466 
1467 	return ret;
1468 }
1469 
1470 /* Stop device and disable input/output functions */
1471 static void
1472 lio_dev_stop(struct rte_eth_dev *eth_dev)
1473 {
1474 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1475 
1476 	lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1477 	lio_dev->intf_open = 0;
1478 	rte_mb();
1479 
1480 	/* Cancel callback if still running. */
1481 	rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1482 
1483 	lio_send_rx_ctrl_cmd(eth_dev, 0);
1484 
1485 	lio_wait_for_instr_fetch(lio_dev);
1486 
1487 	/* Clear recorded link status */
1488 	lio_dev->linfo.link.link_status64 = 0;
1489 }
1490 
1491 static int
1492 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
1493 {
1494 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1495 
1496 	if (!lio_dev->intf_open) {
1497 		lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1498 		return 0;
1499 	}
1500 
1501 	if (lio_dev->linfo.link.s.link_up) {
1502 		lio_dev_info(lio_dev, "Link is already UP\n");
1503 		return 0;
1504 	}
1505 
1506 	if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
1507 		lio_dev_err(lio_dev, "Unable to set Link UP\n");
1508 		return -1;
1509 	}
1510 
1511 	lio_dev->linfo.link.s.link_up = 1;
1512 	eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1513 
1514 	return 0;
1515 }
1516 
1517 static int
1518 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1519 {
1520 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1521 
1522 	if (!lio_dev->intf_open) {
1523 		lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1524 		return 0;
1525 	}
1526 
1527 	if (!lio_dev->linfo.link.s.link_up) {
1528 		lio_dev_info(lio_dev, "Link is already DOWN\n");
1529 		return 0;
1530 	}
1531 
1532 	lio_dev->linfo.link.s.link_up = 0;
1533 	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1534 
1535 	if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1536 		lio_dev->linfo.link.s.link_up = 1;
1537 		eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1538 		lio_dev_err(lio_dev, "Unable to set Link Down\n");
1539 		return -1;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 /**
1546  * Reset and stop the device. This occurs on the first
1547  * call to this routine. Subsequent calls will simply
1548  * return. NB: This will require the NIC to be rebooted.
1549  *
1550  * @param eth_dev
1551  *    Pointer to the structure rte_eth_dev
1552  *
1553  * @return
1554  *    - nothing
1555  */
1556 static void
1557 lio_dev_close(struct rte_eth_dev *eth_dev)
1558 {
1559 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1560 
1561 	lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
1562 
1563 	if (lio_dev->intf_open)
1564 		lio_dev_stop(eth_dev);
1565 
1566 	/* Reset ioq regs */
1567 	lio_dev->fn_list.setup_device_regs(lio_dev);
1568 
1569 	if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
1570 		cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1571 		rte_delay_ms(LIO_PCI_FLR_WAIT);
1572 	}
1573 
1574 	/* lio_free_mbox */
1575 	lio_dev->fn_list.free_mbox(lio_dev);
1576 
1577 	/* Free glist resources */
1578 	rte_free(lio_dev->glist_head);
1579 	rte_free(lio_dev->glist_lock);
1580 	lio_dev->glist_head = NULL;
1581 	lio_dev->glist_lock = NULL;
1582 
1583 	lio_dev->port_configured = 0;
1584 
1585 	 /* Delete all queues */
1586 	lio_dev_clear_queues(eth_dev);
1587 }
1588 
1589 /**
1590  * Enable tunnel rx checksum verification from firmware.
1591  */
1592 static void
1593 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
1594 {
1595 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1596 	struct lio_dev_ctrl_cmd ctrl_cmd;
1597 	struct lio_ctrl_pkt ctrl_pkt;
1598 
1599 	/* flush added to prevent cmd failure
1600 	 * incase the queue is full
1601 	 */
1602 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1603 
1604 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1605 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1606 
1607 	ctrl_cmd.eth_dev = eth_dev;
1608 	ctrl_cmd.cond = 0;
1609 
1610 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
1611 	ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
1612 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1613 
1614 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1615 		lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
1616 		return;
1617 	}
1618 
1619 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1620 		lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
1621 }
1622 
1623 /**
1624  * Enable checksum calculation for inner packet in a tunnel.
1625  */
1626 static void
1627 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
1628 {
1629 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1630 	struct lio_dev_ctrl_cmd ctrl_cmd;
1631 	struct lio_ctrl_pkt ctrl_pkt;
1632 
1633 	/* flush added to prevent cmd failure
1634 	 * incase the queue is full
1635 	 */
1636 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1637 
1638 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1639 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1640 
1641 	ctrl_cmd.eth_dev = eth_dev;
1642 	ctrl_cmd.cond = 0;
1643 
1644 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
1645 	ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
1646 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1647 
1648 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1649 		lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
1650 		return;
1651 	}
1652 
1653 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1654 		lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
1655 }
1656 
1657 static int
1658 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
1659 			    int num_rxq)
1660 {
1661 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1662 	struct lio_dev_ctrl_cmd ctrl_cmd;
1663 	struct lio_ctrl_pkt ctrl_pkt;
1664 
1665 	if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
1666 		lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1667 			    LIO_Q_RECONF_MIN_VERSION);
1668 		return -ENOTSUP;
1669 	}
1670 
1671 	/* flush added to prevent cmd failure
1672 	 * incase the queue is full
1673 	 */
1674 	lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1675 
1676 	memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1677 	memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1678 
1679 	ctrl_cmd.eth_dev = eth_dev;
1680 	ctrl_cmd.cond = 0;
1681 
1682 	ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
1683 	ctrl_pkt.ncmd.s.param1 = num_txq;
1684 	ctrl_pkt.ncmd.s.param2 = num_rxq;
1685 	ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1686 
1687 	if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1688 		lio_dev_err(lio_dev, "Failed to send queue count control command\n");
1689 		return -1;
1690 	}
1691 
1692 	if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
1693 		lio_dev_err(lio_dev, "Queue count control command timed out\n");
1694 		return -1;
1695 	}
1696 
1697 	return 0;
1698 }
1699 
1700 static int
1701 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
1702 {
1703 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1704 
1705 	if (lio_dev->nb_rx_queues != num_rxq ||
1706 	    lio_dev->nb_tx_queues != num_txq) {
1707 		if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
1708 			return -1;
1709 		lio_dev->nb_rx_queues = num_rxq;
1710 		lio_dev->nb_tx_queues = num_txq;
1711 	}
1712 
1713 	if (lio_dev->intf_open)
1714 		lio_dev_stop(eth_dev);
1715 
1716 	/* Reset ioq registers */
1717 	if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1718 		lio_dev_err(lio_dev, "Failed to configure device registers\n");
1719 		return -1;
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 static int
1726 lio_dev_configure(struct rte_eth_dev *eth_dev)
1727 {
1728 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
1729 	uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1730 	int retval, num_iqueues, num_oqueues;
1731 	uint8_t mac[RTE_ETHER_ADDR_LEN], i;
1732 	struct lio_if_cfg_resp *resp;
1733 	struct lio_soft_command *sc;
1734 	union lio_if_cfg if_cfg;
1735 	uint32_t resp_size;
1736 
1737 	PMD_INIT_FUNC_TRACE();
1738 
1739 	if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
1740 		eth_dev->data->dev_conf.rxmode.offloads |=
1741 			DEV_RX_OFFLOAD_RSS_HASH;
1742 
1743 	/* Inform firmware about change in number of queues to use.
1744 	 * Disable IO queues and reset registers for re-configuration.
1745 	 */
1746 	if (lio_dev->port_configured)
1747 		return lio_reconf_queues(eth_dev,
1748 					 eth_dev->data->nb_tx_queues,
1749 					 eth_dev->data->nb_rx_queues);
1750 
1751 	lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1752 	lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1753 
1754 	/* Set max number of queues which can be re-configured. */
1755 	lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
1756 	lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
1757 
1758 	resp_size = sizeof(struct lio_if_cfg_resp);
1759 	sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1760 	if (sc == NULL)
1761 		return -ENOMEM;
1762 
1763 	resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1764 
1765 	/* Firmware doesn't have capability to reconfigure the queues,
1766 	 * Claim all queues, and use as many required
1767 	 */
1768 	if_cfg.if_cfg64 = 0;
1769 	if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1770 	if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1771 	if_cfg.s.base_queue = 0;
1772 
1773 	if_cfg.s.gmx_port_id = lio_dev->pf_num;
1774 
1775 	lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1776 				 LIO_OPCODE_IF_CFG, 0,
1777 				 if_cfg.if_cfg64, 0);
1778 
1779 	/* Setting wait time in seconds */
1780 	sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1781 
1782 	retval = lio_send_soft_command(lio_dev, sc);
1783 	if (retval == LIO_IQ_SEND_FAILED) {
1784 		lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1785 			    retval);
1786 		/* Soft instr is freed by driver in case of failure. */
1787 		goto nic_config_fail;
1788 	}
1789 
1790 	/* Sleep on a wait queue till the cond flag indicates that the
1791 	 * response arrived or timed-out.
1792 	 */
1793 	while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1794 		lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1795 		lio_process_ordered_list(lio_dev);
1796 		rte_delay_ms(1);
1797 	}
1798 
1799 	retval = resp->status;
1800 	if (retval) {
1801 		lio_dev_err(lio_dev, "iq/oq config failed\n");
1802 		goto nic_config_fail;
1803 	}
1804 
1805 	strlcpy(lio_dev->firmware_version,
1806 		resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH);
1807 
1808 	lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1809 			 sizeof(struct octeon_if_cfg_info) >> 3);
1810 
1811 	num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1812 	num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1813 
1814 	if (!(num_iqueues) || !(num_oqueues)) {
1815 		lio_dev_err(lio_dev,
1816 			    "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1817 			    (unsigned long)resp->cfg_info.iqmask,
1818 			    (unsigned long)resp->cfg_info.oqmask);
1819 		goto nic_config_fail;
1820 	}
1821 
1822 	lio_dev_dbg(lio_dev,
1823 		    "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1824 		    eth_dev->data->port_id,
1825 		    (unsigned long)resp->cfg_info.iqmask,
1826 		    (unsigned long)resp->cfg_info.oqmask,
1827 		    num_iqueues, num_oqueues);
1828 
1829 	lio_dev->linfo.num_rxpciq = num_oqueues;
1830 	lio_dev->linfo.num_txpciq = num_iqueues;
1831 
1832 	for (i = 0; i < num_oqueues; i++) {
1833 		lio_dev->linfo.rxpciq[i].rxpciq64 =
1834 		    resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1835 		lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1836 			    i, lio_dev->linfo.rxpciq[i].s.q_no);
1837 	}
1838 
1839 	for (i = 0; i < num_iqueues; i++) {
1840 		lio_dev->linfo.txpciq[i].txpciq64 =
1841 		    resp->cfg_info.linfo.txpciq[i].txpciq64;
1842 		lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1843 			    i, lio_dev->linfo.txpciq[i].s.q_no);
1844 	}
1845 
1846 	lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1847 	lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1848 	lio_dev->linfo.link.link_status64 =
1849 			resp->cfg_info.linfo.link.link_status64;
1850 
1851 	/* 64-bit swap required on LE machines */
1852 	lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1853 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
1854 		mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1855 				       2 + i));
1856 
1857 	/* Copy the permanent MAC address */
1858 	rte_ether_addr_copy((struct rte_ether_addr *)mac,
1859 			&eth_dev->data->mac_addrs[0]);
1860 
1861 	/* enable firmware checksum support for tunnel packets */
1862 	lio_enable_hw_tunnel_rx_checksum(eth_dev);
1863 	lio_enable_hw_tunnel_tx_checksum(eth_dev);
1864 
1865 	lio_dev->glist_lock =
1866 	    rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1867 	if (lio_dev->glist_lock == NULL)
1868 		return -ENOMEM;
1869 
1870 	lio_dev->glist_head =
1871 		rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1872 			    0);
1873 	if (lio_dev->glist_head == NULL) {
1874 		rte_free(lio_dev->glist_lock);
1875 		lio_dev->glist_lock = NULL;
1876 		return -ENOMEM;
1877 	}
1878 
1879 	lio_dev_link_update(eth_dev, 0);
1880 
1881 	lio_dev->port_configured = 1;
1882 
1883 	lio_free_soft_command(sc);
1884 
1885 	/* Reset ioq regs */
1886 	lio_dev->fn_list.setup_device_regs(lio_dev);
1887 
1888 	/* Free iq_0 used during init */
1889 	lio_free_instr_queue0(lio_dev);
1890 
1891 	return 0;
1892 
1893 nic_config_fail:
1894 	lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1895 	lio_free_soft_command(sc);
1896 	lio_free_instr_queue0(lio_dev);
1897 
1898 	return -ENODEV;
1899 }
1900 
1901 /* Define our ethernet definitions */
1902 static const struct eth_dev_ops liovf_eth_dev_ops = {
1903 	.dev_configure		= lio_dev_configure,
1904 	.dev_start		= lio_dev_start,
1905 	.dev_stop		= lio_dev_stop,
1906 	.dev_set_link_up	= lio_dev_set_link_up,
1907 	.dev_set_link_down	= lio_dev_set_link_down,
1908 	.dev_close		= lio_dev_close,
1909 	.promiscuous_enable	= lio_dev_promiscuous_enable,
1910 	.promiscuous_disable	= lio_dev_promiscuous_disable,
1911 	.allmulticast_enable	= lio_dev_allmulticast_enable,
1912 	.allmulticast_disable	= lio_dev_allmulticast_disable,
1913 	.link_update		= lio_dev_link_update,
1914 	.stats_get		= lio_dev_stats_get,
1915 	.xstats_get		= lio_dev_xstats_get,
1916 	.xstats_get_names	= lio_dev_xstats_get_names,
1917 	.stats_reset		= lio_dev_stats_reset,
1918 	.xstats_reset		= lio_dev_xstats_reset,
1919 	.dev_infos_get		= lio_dev_info_get,
1920 	.vlan_filter_set	= lio_dev_vlan_filter_set,
1921 	.rx_queue_setup		= lio_dev_rx_queue_setup,
1922 	.rx_queue_release	= lio_dev_rx_queue_release,
1923 	.tx_queue_setup		= lio_dev_tx_queue_setup,
1924 	.tx_queue_release	= lio_dev_tx_queue_release,
1925 	.reta_update		= lio_dev_rss_reta_update,
1926 	.reta_query		= lio_dev_rss_reta_query,
1927 	.rss_hash_conf_get	= lio_dev_rss_hash_conf_get,
1928 	.rss_hash_update	= lio_dev_rss_hash_update,
1929 	.udp_tunnel_port_add	= lio_dev_udp_tunnel_add,
1930 	.udp_tunnel_port_del	= lio_dev_udp_tunnel_del,
1931 	.mtu_set		= lio_dev_mtu_set,
1932 };
1933 
1934 static void
1935 lio_check_pf_hs_response(void *lio_dev)
1936 {
1937 	struct lio_device *dev = lio_dev;
1938 
1939 	/* check till response arrives */
1940 	if (dev->pfvf_hsword.coproc_tics_per_us)
1941 		return;
1942 
1943 	cn23xx_vf_handle_mbox(dev);
1944 
1945 	rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1946 }
1947 
1948 /**
1949  * \brief Identify the LIO device and to map the BAR address space
1950  * @param lio_dev lio device
1951  */
1952 static int
1953 lio_chip_specific_setup(struct lio_device *lio_dev)
1954 {
1955 	struct rte_pci_device *pdev = lio_dev->pci_dev;
1956 	uint32_t dev_id = pdev->id.device_id;
1957 	const char *s;
1958 	int ret = 1;
1959 
1960 	switch (dev_id) {
1961 	case LIO_CN23XX_VF_VID:
1962 		lio_dev->chip_id = LIO_CN23XX_VF_VID;
1963 		ret = cn23xx_vf_setup_device(lio_dev);
1964 		s = "CN23XX VF";
1965 		break;
1966 	default:
1967 		s = "?";
1968 		lio_dev_err(lio_dev, "Unsupported Chip\n");
1969 	}
1970 
1971 	if (!ret)
1972 		lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1973 
1974 	return ret;
1975 }
1976 
1977 static int
1978 lio_first_time_init(struct lio_device *lio_dev,
1979 		    struct rte_pci_device *pdev)
1980 {
1981 	int dpdk_queues;
1982 
1983 	PMD_INIT_FUNC_TRACE();
1984 
1985 	/* set dpdk specific pci device pointer */
1986 	lio_dev->pci_dev = pdev;
1987 
1988 	/* Identify the LIO type and set device ops */
1989 	if (lio_chip_specific_setup(lio_dev)) {
1990 		lio_dev_err(lio_dev, "Chip specific setup failed\n");
1991 		return -1;
1992 	}
1993 
1994 	/* Initialize soft command buffer pool */
1995 	if (lio_setup_sc_buffer_pool(lio_dev)) {
1996 		lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
1997 		return -1;
1998 	}
1999 
2000 	/* Initialize lists to manage the requests of different types that
2001 	 * arrive from applications for this lio device.
2002 	 */
2003 	lio_setup_response_list(lio_dev);
2004 
2005 	if (lio_dev->fn_list.setup_mbox(lio_dev)) {
2006 		lio_dev_err(lio_dev, "Mailbox setup failed\n");
2007 		goto error;
2008 	}
2009 
2010 	/* Check PF response */
2011 	lio_check_pf_hs_response((void *)lio_dev);
2012 
2013 	/* Do handshake and exit if incompatible PF driver */
2014 	if (cn23xx_pfvf_handshake(lio_dev))
2015 		goto error;
2016 
2017 	/* Request and wait for device reset. */
2018 	if (pdev->kdrv == RTE_KDRV_IGB_UIO) {
2019 		cn23xx_vf_ask_pf_to_do_flr(lio_dev);
2020 		/* FLR wait time doubled as a precaution. */
2021 		rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
2022 	}
2023 
2024 	if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
2025 		lio_dev_err(lio_dev, "Failed to configure device registers\n");
2026 		goto error;
2027 	}
2028 
2029 	if (lio_setup_instr_queue0(lio_dev)) {
2030 		lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
2031 		goto error;
2032 	}
2033 
2034 	dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
2035 
2036 	lio_dev->max_tx_queues = dpdk_queues;
2037 	lio_dev->max_rx_queues = dpdk_queues;
2038 
2039 	/* Enable input and output queues for this device */
2040 	if (lio_dev->fn_list.enable_io_queues(lio_dev))
2041 		goto error;
2042 
2043 	return 0;
2044 
2045 error:
2046 	lio_free_sc_buffer_pool(lio_dev);
2047 	if (lio_dev->mbox[0])
2048 		lio_dev->fn_list.free_mbox(lio_dev);
2049 	if (lio_dev->instr_queue[0])
2050 		lio_free_instr_queue0(lio_dev);
2051 
2052 	return -1;
2053 }
2054 
2055 static int
2056 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2057 {
2058 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
2059 
2060 	PMD_INIT_FUNC_TRACE();
2061 
2062 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2063 		return 0;
2064 
2065 	/* lio_free_sc_buffer_pool */
2066 	lio_free_sc_buffer_pool(lio_dev);
2067 
2068 	eth_dev->dev_ops = NULL;
2069 	eth_dev->rx_pkt_burst = NULL;
2070 	eth_dev->tx_pkt_burst = NULL;
2071 
2072 	return 0;
2073 }
2074 
2075 static int
2076 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
2077 {
2078 	struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
2079 	struct lio_device *lio_dev = LIO_DEV(eth_dev);
2080 
2081 	PMD_INIT_FUNC_TRACE();
2082 
2083 	eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
2084 	eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
2085 
2086 	/* Primary does the initialization. */
2087 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2088 		return 0;
2089 
2090 	rte_eth_copy_pci_info(eth_dev, pdev);
2091 
2092 	if (pdev->mem_resource[0].addr) {
2093 		lio_dev->hw_addr = pdev->mem_resource[0].addr;
2094 	} else {
2095 		PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
2096 		return -ENODEV;
2097 	}
2098 
2099 	lio_dev->eth_dev = eth_dev;
2100 	/* set lio device print string */
2101 	snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
2102 		 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
2103 		 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2104 
2105 	lio_dev->port_id = eth_dev->data->port_id;
2106 
2107 	if (lio_first_time_init(lio_dev, pdev)) {
2108 		lio_dev_err(lio_dev, "Device init failed\n");
2109 		return -EINVAL;
2110 	}
2111 
2112 	eth_dev->dev_ops = &liovf_eth_dev_ops;
2113 	eth_dev->data->mac_addrs = rte_zmalloc("lio", RTE_ETHER_ADDR_LEN, 0);
2114 	if (eth_dev->data->mac_addrs == NULL) {
2115 		lio_dev_err(lio_dev,
2116 			    "MAC addresses memory allocation failed\n");
2117 		eth_dev->dev_ops = NULL;
2118 		eth_dev->rx_pkt_burst = NULL;
2119 		eth_dev->tx_pkt_burst = NULL;
2120 		return -ENOMEM;
2121 	}
2122 
2123 	rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
2124 	rte_wmb();
2125 
2126 	lio_dev->port_configured = 0;
2127 	/* Always allow unicast packets */
2128 	lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
2129 
2130 	return 0;
2131 }
2132 
2133 static int
2134 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2135 		      struct rte_pci_device *pci_dev)
2136 {
2137 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
2138 			lio_eth_dev_init);
2139 }
2140 
2141 static int
2142 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2143 {
2144 	return rte_eth_dev_pci_generic_remove(pci_dev,
2145 					      lio_eth_dev_uninit);
2146 }
2147 
2148 /* Set of PCI devices this driver supports */
2149 static const struct rte_pci_id pci_id_liovf_map[] = {
2150 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
2151 	{ .vendor_id = 0, /* sentinel */ }
2152 };
2153 
2154 static struct rte_pci_driver rte_liovf_pmd = {
2155 	.id_table	= pci_id_liovf_map,
2156 	.drv_flags      = RTE_PCI_DRV_NEED_MAPPING,
2157 	.probe		= lio_eth_dev_pci_probe,
2158 	.remove		= lio_eth_dev_pci_remove,
2159 };
2160 
2161 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
2162 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
2163 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
2164 
2165 RTE_INIT(lio_init_log)
2166 {
2167 	lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
2168 	if (lio_logtype_init >= 0)
2169 		rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE);
2170 	lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver");
2171 	if (lio_logtype_driver >= 0)
2172 		rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE);
2173 }
2174